code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy
import cv2 as cv
from matplotlib import pyplot as plt
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QImage
import win32gui
import sys
import time
import win32api
import win32print
import win32con
import os
import keyboard
import win32com.client
import pythoncom
base_dir = os.path.dirname(os.path.abspath(__file__))
app = QApplication(sys.argv)
def second_key_sort(k):
return k[1]
def cutscreen(hwnd):
pix = QApplication.primaryScreen().grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888)
width = pix.width()
height = pix.height()
ptr = pix.bits()
ptr.setsize(height * width * 4)
img = numpy.frombuffer(ptr, numpy.uint8).reshape((height, width, 4))
img1 = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
return img1
def GetWindowCorner(hwnd):
screen = QApplication.primaryScreen()
pix = screen.grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888)
rect = win32gui.GetWindowRect(hwnd)
hDC = win32gui.GetDC(0)
w = win32print.GetDeviceCaps(hDC, win32con.DESKTOPHORZRES)
pscale = w/win32api.GetSystemMetrics(0)
xf = int(rect[2]/pscale)-pix.width()
yf = int(rect[3]/pscale)-pix.height()
return [xf,yf]
def clk(pos,hwnd):
time.sleep(0.1)
off_set=GetWindowCorner(hwnd)
pos=(pos[0]+off_set[0],pos[1]+off_set[1])
win32api.SetCursorPos(pos)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
global window_height
window_height=0
def GetWindowHeight():
global window_height
if window_height!=0:
return window_height
hwnd = win32gui.FindWindow('UnityWndClass', None)
screen = QApplication.primaryScreen()
pix = screen.grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888)
window_height=pix.height()
return window_height
def LocatePic(target,picname):
matchResult=[]
template = cv.imread(r"{0}\pic_{1}p\{2}.png".format(base_dir,GetWindowHeight(),picname),0)
theight, twidth = template.shape[:2]
result = cv.matchTemplate(target,template,cv.TM_SQDIFF_NORMED)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
temp_loc = min_loc
if min_val<0.01:
matchResult.append([int(min_loc[0]+twidth/2),int(min_loc[1]+theight/2)])
loc = numpy.where(result<0.01)
for other_loc in zip(*loc[::-1]):
if (temp_loc[0]-other_loc[0])**2>25 or(temp_loc[1]-other_loc[1])**2>25 :
if (other_loc[0]-min_loc[0])**2>25 or(other_loc[1]-min_loc[1])**2>25 :
temp_loc = other_loc
matchResult.append([int(other_loc[0]+twidth/2),int(other_loc[1]+theight/2)])
matchResult.sort(key=second_key_sort)
return matchResult
def OpenMap(hwnd):
if len(LocatePic(cutscreen(hwnd),'close_btn'))==0:
keyboard.send('m')
return 0
return 1
def SetBtnON(hwnd):
pos=LocatePic(cutscreen(hwnd),'off_btn')
if len(pos):
clk(pos[0],hwnd)
return 0
return 1
def SetBtnOFF(hwnd):
pos=LocatePic(cutscreen(hwnd),'on_btn')
print(pos)
if len(pos):
clk(pos[0],hwnd)
return 0
return 1
def ClickBtn(hwnd):
pos=LocatePic(cutscreen(hwnd),'confirm_btn')
if len(pos):
clk(pos[0],hwnd)
return 0
return 1
def ClickDel(hwnd):
pos=LocatePic(cutscreen(hwnd),'del')
if len(pos):
print(pos)
clk(pos[0],hwnd)
return 0
return 1
def ClickMarkList(hwnd):
pos=LocatePic(cutscreen(hwnd),'marklist0')
pos1=LocatePic(cutscreen(hwnd),'marklist1')
if len(pos):
clk(pos[0],hwnd)
print(pos)
return 0
if(len(pos1)):
clk(pos1[0],hwnd)
print(pos1)
return 0
return 1
def DeleteAllMark(hwnd,name):
poslist=LocatePic(cutscreen(hwnd),name)
t1=time.clock()
while len(poslist):
time.sleep(0.2)
clk(poslist[0],hwnd)
time.sleep(0.1)
flag=0
while flag==0:
for i in range(5):
if ClickDel(hwnd)==0:
flag=1
break
if flag==0:
for i in range(5):
if ClickMarkList(hwnd)==0:break
time.sleep(0.2)
poslist=LocatePic(cutscreen(hwnd),name)
t2=time.clock()
if(t2-t1>10): break
return
def DeleteAllMarks(hwnd):
for i in range(7):
DeleteAllMark(hwnd, 'mark{0}'.format(i))
return
if __name__ == '__main__':
hwnd = win32gui.FindWindow('UnityWndClass', None)
pythoncom.CoInitialize()
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
win32gui.SetForegroundWindow(hwnd)
time.sleep(0.2)
DeleteAllMarks(hwnd)
#for i in range(7):
#DeleteAllMark(hwnd, 'mark{0}'.format(i))
#t1=time.clock()
#print(LocatePic(cutscreen(hwnd),'marklist0'))
#t2=time.clock()
#print(t2-t1)
#OpenMap(hwnd) | [
"PyQt5.QtWidgets.QApplication.primaryScreen",
"win32api.SetCursorPos",
"PyQt5.QtWidgets.QApplication",
"cv2.minMaxLoc",
"win32gui.SetForegroundWindow",
"win32api.mouse_event",
"cv2.matchTemplate",
"win32api.GetSystemMetrics",
"os.path.abspath",
"cv2.cvtColor",
"time.clock",
"pythoncom.CoInitia... | [((354, 376), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (366, 376), False, 'from PyQt5.QtWidgets import QApplication\n'), ((321, 346), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (336, 346), False, 'import os\n'), ((770, 805), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2GRAY'], {}), '(img, cv.COLOR_RGB2GRAY)\n', (781, 805), True, 'import cv2 as cv\n'), ((866, 894), 'PyQt5.QtWidgets.QApplication.primaryScreen', 'QApplication.primaryScreen', ([], {}), '()\n', (892, 894), False, 'from PyQt5.QtWidgets import QApplication\n'), ((997, 1025), 'win32gui.GetWindowRect', 'win32gui.GetWindowRect', (['hwnd'], {}), '(hwnd)\n', (1019, 1025), False, 'import win32gui\n'), ((1036, 1053), 'win32gui.GetDC', 'win32gui.GetDC', (['(0)'], {}), '(0)\n', (1050, 1053), False, 'import win32gui\n'), ((1062, 1116), 'win32print.GetDeviceCaps', 'win32print.GetDeviceCaps', (['hDC', 'win32con.DESKTOPHORZRES'], {}), '(hDC, win32con.DESKTOPHORZRES)\n', (1086, 1116), False, 'import win32print\n'), ((1286, 1301), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1296, 1301), False, 'import time\n'), ((1386, 1412), 'win32api.SetCursorPos', 'win32api.SetCursorPos', (['pos'], {}), '(pos)\n', (1407, 1412), False, 'import win32api\n'), ((1417, 1432), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1427, 1432), False, 'import time\n'), ((1437, 1500), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTDOWN', '(0)', '(0)', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)\n', (1457, 1500), False, 'import win32api\n'), ((1505, 1520), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1515, 1520), False, 'import time\n'), ((1525, 1586), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTUP', '(0)', '(0)', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)\n', (1545, 1586), False, 'import win32api\n'), ((1737, 1779), 'win32gui.FindWindow', 'win32gui.FindWindow', (['"""UnityWndClass"""', 'None'], {}), "('UnityWndClass', None)\n", (1756, 1779), False, 'import win32gui\n'), ((1793, 1821), 'PyQt5.QtWidgets.QApplication.primaryScreen', 'QApplication.primaryScreen', ([], {}), '()\n', (1819, 1821), False, 'from PyQt5.QtWidgets import QApplication\n'), ((2168, 2223), 'cv2.matchTemplate', 'cv.matchTemplate', (['target', 'template', 'cv.TM_SQDIFF_NORMED'], {}), '(target, template, cv.TM_SQDIFF_NORMED)\n', (2184, 2223), True, 'import cv2 as cv\n'), ((2263, 2283), 'cv2.minMaxLoc', 'cv.minMaxLoc', (['result'], {}), '(result)\n', (2275, 2283), True, 'import cv2 as cv\n'), ((2419, 2445), 'numpy.where', 'numpy.where', (['(result < 0.01)'], {}), '(result < 0.01)\n', (2430, 2445), False, 'import numpy\n'), ((3937, 3949), 'time.clock', 'time.clock', ([], {}), '()\n', (3947, 3949), False, 'import time\n'), ((4605, 4647), 'win32gui.FindWindow', 'win32gui.FindWindow', (['"""UnityWndClass"""', 'None'], {}), "('UnityWndClass', None)\n", (4624, 4647), False, 'import win32gui\n'), ((4652, 4676), 'pythoncom.CoInitialize', 'pythoncom.CoInitialize', ([], {}), '()\n', (4674, 4676), False, 'import pythoncom\n'), ((4759, 4793), 'win32gui.SetForegroundWindow', 'win32gui.SetForegroundWindow', (['hwnd'], {}), '(hwnd)\n', (4787, 4793), False, 'import win32gui\n'), ((4798, 4813), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4808, 4813), False, 'import time\n'), ((1132, 1160), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['(0)'], {}), '(0)\n', (1157, 1160), False, 'import win32api\n'), ((2932, 2950), 'keyboard.send', 'keyboard.send', (['"""m"""'], {}), "('m')\n", (2945, 2950), False, 'import keyboard\n'), ((3982, 3997), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3992, 3997), False, 'import time\n'), ((4035, 4050), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4045, 4050), False, 'import time\n'), ((4330, 4345), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4340, 4345), False, 'import time\n'), ((4405, 4417), 'time.clock', 'time.clock', ([], {}), '()\n', (4415, 4417), False, 'import time\n'), ((692, 726), 'numpy.frombuffer', 'numpy.frombuffer', (['ptr', 'numpy.uint8'], {}), '(ptr, numpy.uint8)\n', (708, 726), False, 'import numpy\n'), ((452, 480), 'PyQt5.QtWidgets.QApplication.primaryScreen', 'QApplication.primaryScreen', ([], {}), '()\n', (478, 480), False, 'from PyQt5.QtWidgets import QApplication\n')] |
import bmw
import numpy as np
problem = bmw.Problem.parse(filepath='../../data/3-refined')
dat = np.load('../2-prod/test-0.npz')
constellation = dat['constellation']
constellation_type_indices = dat['constellation_type_indices']
test_indices = np.argsort(problem.test_groups)
for tindex2, tindex1 in enumerate(test_indices):
expression = problem.test_set.expressions[tindex1]
count = problem.test_set.counts[tindex1]
passes = [index for index, state in enumerate(constellation) if expression.evaluate(state)]
# print('%3d : %3d %1d : %3d %3d' % (
# tindex2,
# tindex1,
# problem.test_groups[tindex1],
# len(passes),
# count,
# ))
test_array = np.zeros((10, 100))
test_array[...] = -1
car_array = np.zeros((10, 100))
car_array[...] = -1
time_index = 0
slot_index = 0
unscheduled_tests = np.full(10,-1)
for test_index in test_indices:
print('test_index')
print(test_index)
expression = problem.test_set.expressions[test_index]
count = problem.test_set.counts[test_index]
print('count')
print(count)
car_candidates = [index for index, state in enumerate(constellation) if expression.evaluate(state)]
ncar = 0
for car_index in car_candidates:
print('car_index in car_candidates')
print(car_index)
if car_index in car_array[time_index, :slot_index]: continue
print('slot_index = %d' % slot_index)
print('time_index = %d' % time_index)
test_array[slot_index, time_index] = test_index
car_array[slot_index, time_index] = car_index
ncar += 1
if ncar == count: break
slot_index += 1
#if slot_index == test_indices.shape[0]:
if slot_index == 9:
slot_index = 0
time_index += 1
print('final number of cars')
print(ncar)
print(car_array[:, :7])
print(car_candidates)
print('')
print('')
index_unscheduled = 0
if ncar < count:
#raise RuntimeError('Cannot satisfy test')
unscheduled_tests[index_unscheduled] = test_index
print('unscheduled_tests')
print(unscheduled_tests)
index_unscheduled += 1
| [
"numpy.full",
"numpy.load",
"numpy.zeros",
"numpy.argsort",
"bmw.Problem.parse"
] | [((41, 91), 'bmw.Problem.parse', 'bmw.Problem.parse', ([], {'filepath': '"""../../data/3-refined"""'}), "(filepath='../../data/3-refined')\n", (58, 91), False, 'import bmw\n'), ((99, 130), 'numpy.load', 'np.load', (['"""../2-prod/test-0.npz"""'], {}), "('../2-prod/test-0.npz')\n", (106, 130), True, 'import numpy as np\n'), ((247, 278), 'numpy.argsort', 'np.argsort', (['problem.test_groups'], {}), '(problem.test_groups)\n', (257, 278), True, 'import numpy as np\n'), ((716, 735), 'numpy.zeros', 'np.zeros', (['(10, 100)'], {}), '((10, 100))\n', (724, 735), True, 'import numpy as np\n'), ((770, 789), 'numpy.zeros', 'np.zeros', (['(10, 100)'], {}), '((10, 100))\n', (778, 789), True, 'import numpy as np\n'), ((862, 877), 'numpy.full', 'np.full', (['(10)', '(-1)'], {}), '(10, -1)\n', (869, 877), True, 'import numpy as np\n')] |
# standard library imports
import ctypes
from enum import IntEnum
import os
import queue
import re
import warnings
# 3rd party library imports
import numpy as np
# Local imports
from ..config import glymur_config
# The error messages queue
EQ = queue.Queue()
loader = ctypes.windll.LoadLibrary if os.name == 'nt' else ctypes.CDLL
_LIBTIFF = glymur_config('tiff')
_LIBC = glymur_config('c')
class LibTIFFError(RuntimeError):
"""
Raise this exception if we detect a generic error from libtiff.
"""
pass
class Compression(IntEnum):
"""
Compression scheme used on the image data.
See Also
--------
Photometric : The color space of the image data.
"""
NONE = 1
CCITTRLE = 2 # CCITT modified Huffman RLE
CCITTFAX3 = 3 # CCITT Group 3 fax encoding
CCITT_T4 = 3 # CCITT T.4 (TIFF 6 name)
CCITTFAX4 = 4 # CCITT Group 4 fax encoding
CCITT_T6 = 4 # CCITT T.6 (TIFF 6 name)
LZW = 5 # Lempel-Ziv & Welch
OJPEG = 6 # 6.0 JPEG
JPEG = 7 # JPEG DCT compression
T85 = 9 # TIFF/FX T.85 JBIG compression
T43 = 10 # TIFF/FX T.43 colour by layered JBIG compression
NEXT = 32766 # NeXT 2-bit RLE
CCITTRLEW = 32771 # #1 w/ word alignment
PACKBITS = 32773 # Macintosh RLE
THUNDERSCAN = 32809 # ThunderScan RLE
PIXARFILM = 32908 # companded 10bit LZW
PIXARLOG = 32909 # companded 11bit ZIP
DEFLATE = 32946 # compression
ADOBE_DEFLATE = 8 # compression, as recognized by Adobe
DCS = 32947 # DCS encoding
JBIG = 34661 # JBIG
SGILOG = 34676 # Log Luminance RLE
SGILOG24 = 34677 # Log 24-bit packed
JP2000 = 34712 # JPEG2000
LZMA = 34925 # LZMA2
class InkSet(IntEnum):
"""
The set of inks used in a separated (PhotometricInterpretation=5) image.
"""
CMYK = 1
MULTIINK = 2
class JPEGColorMode(IntEnum):
"""
When writing images with photometric interpretation equal to YCbCr and
compression equal to JPEG, the pseudo tag JPEGColorMode should usually be
set to RGB, unless the image values truly are in YCbCr.
See Also
--------
Photometric : The color space of the image data.
"""
RAW = 0
RGB = 1
class PlanarConfig(IntEnum):
"""
How the components of each pixel are stored.
Writing images with a PlanarConfig value of PlanarConfig.SEPARATE is not
currently supported.
"""
CONTIG = 1 # single image plane
SEPARATE = 2 # separate planes of data
class Orientation(IntEnum):
"""
The orientation of the image with respect to the rows and columns.
"""
TOPLEFT = 1 # row 0 top, col 0 lhs */
TOPRIGHT = 2 # row 0 top, col 0 rhs */
BOTRIGHT = 3 # row 0 bottom, col 0 rhs */
BOTLEFT = 4 # row 0 bottom, col 0 lhs */
LEFTTOP = 5 # row 0 lhs, col 0 top */
RIGHTTOP = 6 # row 0 rhs, col 0 top */
RIGHTBOT = 7 # row 0 rhs, col 0 bottom */
LEFTBOT = 8 # row 0 lhs, col 0 bottom */
class Photometric(IntEnum):
"""
The color space of the image data.
Examples
--------
Load an image of astronaut <NAME> from scikit-image.
>>> import numpy as np
>>> import skimage.data
>>> image = skimage.data.astronaut()
Create a BigTIFF with JPEG compression. There is not much reason to do
this if you do not also specify YCbCr as the photometric interpretation.
>>> w, h, nz = image.shape
>>> from spiff import TIFF, lib
>>> t = TIFF('astronaut-jpeg.tif', mode='w8')
>>> t['Photometric'] = lib.Photometric.YCBCR
>>> t['Compression'] = lib.Compression.JPEG
>>> t['JPEGColorMode'] = lib.JPEGColorMode.RGB
>>> t['PlanarConfig'] = lib.PlanarConfig.CONTIG
>>> t['JPEGQuality'] = 90
>>> t['YCbCrSubsampling'] = (1, 1)
>>> t['ImageWidth'] = w
>>> t['ImageLength'] = h
>>> t['TileWidth'] = int(w/2)
>>> t['TileLength'] = int(h/2)
>>> t['BitsPerSample'] = 8
>>> t['SamplesPerPixel'] = nz
>>> t['Software'] = lib.getVersion()
>>> t[:] = image
>>> t
TIFF Directory at offset 0x0 (0)
Image Width: 512 Image Length: 512
Tile Width: 256 Tile Length: 256
Bits/Sample: 8
Compression Scheme: JPEG
Photometric Interpretation: YCbCr
YCbCr Subsampling: 1, 1
Samples/Pixel: 3
Planar Configuration: single image plane
Reference Black/White:
0: 0 255
1: 128 255
2: 128 255
Software: LIBTIFF, Version 4.0.9
Copyright (c) 1988-1996 <NAME>
Copyright (c) 1991-1996 Silicon Graphics, Inc.
JPEG Tables: (574 bytes)
<BLANKLINE>
"""
MINISWHITE = 0 # value is white
MINISBLACK = 1 # value is black
RGB = 2 # color model
PALETTE = 3 # map indexed
MASK = 4 # holdout mask
SEPARATED = 5 # color separations
YCBCR = 6 # CCIR 601
CIELAB = 8 # 1976 CIE L*a*b*
ICCLAB = 9 # L*a*b* [Adobe TIFF Technote 4]
ITULAB = 10 # L*a*b*
CFA = 32803 # filter array
LOGL = 32844 # Log2(L)
LOGLUV = 32845 # Log2(L) (u',v')
class SampleFormat(IntEnum):
"""
Specifies how to interpret each data sample in a pixel.
"""
UINT = 1
INT = 2
IEEEFP = 3
VOID = 4
COMPLEXINT = 5
COMPLEXIEEEP = 6
def _handle_error(module, fmt, ap):
# Use VSPRINTF in the C library to put together the error message.
# int vsprintf(char * buffer, const char * restrict format, va_list ap);
buffer = ctypes.create_string_buffer(1000)
argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p]
_LIBC.vsprintf.argtypes = argtypes
_LIBC.vsprintf.restype = ctypes.c_int32
_LIBC.vsprintf(buffer, fmt, ap)
module = module.decode('utf-8')
error_str = buffer.value.decode('utf-8')
message = f"{module}: {error_str}"
EQ.put(message)
return None
def _handle_warning(module, fmt, ap):
# Use VSPRINTF in the C library to put together the warning message.
# int vsprintf(char * buffer, const char * restrict format, va_list ap);
buffer = ctypes.create_string_buffer(1000)
argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p]
_LIBC.vsprintf.argtypes = argtypes
_LIBC.vsprintf.restype = ctypes.c_int32
_LIBC.vsprintf(buffer, fmt, ap)
module = module.decode('utf-8')
warning_str = buffer.value.decode('utf-8')
message = f"{module}: {warning_str}"
warnings.warn(message)
# Set the function types for the warning handler.
_WFUNCTYPE = ctypes.CFUNCTYPE(
ctypes.c_void_p, # return type of warning handler, void *
ctypes.c_char_p, # module
ctypes.c_char_p, # fmt
ctypes.c_void_p # va_list
)
_ERROR_HANDLER = _WFUNCTYPE(_handle_error)
_WARNING_HANDLER = _WFUNCTYPE(_handle_warning)
def _set_error_warning_handlers():
"""
Setup default python error and warning handlers.
"""
old_warning_handler = setWarningHandler()
old_error_handler = setErrorHandler()
return old_error_handler, old_warning_handler
def _reset_error_warning_handlers(old_error_handler, old_warning_handler):
"""
Restore previous error and warning handlers.
"""
setWarningHandler(old_warning_handler)
setErrorHandler(old_error_handler)
def close(fp):
"""
Corresponds to TIFFClose
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p]
_LIBTIFF.TIFFClose.argtypes = ARGTYPES
_LIBTIFF.TIFFClose.restype = None
_LIBTIFF.TIFFClose(fp)
_reset_error_warning_handlers(err_handler, warn_handler)
def computeStrip(fp, row, sample):
"""
Corresponds to TIFFComputeStrip
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_uint16]
_LIBTIFF.TIFFComputeStrip.argtypes = ARGTYPES
_LIBTIFF.TIFFComputeStrip.restype = ctypes.c_uint32
stripnum = _LIBTIFF.TIFFComputeStrip(fp, row, sample)
_reset_error_warning_handlers(err_handler, warn_handler)
return stripnum
def computeTile(fp, x, y, z, sample):
"""
Corresponds to TIFFComputeTile
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint16]
_LIBTIFF.TIFFComputeTile.argtypes = ARGTYPES
_LIBTIFF.TIFFComputeTile.restype = ctypes.c_uint32
tilenum = _LIBTIFF.TIFFComputeTile(fp, x, y, z, sample)
_reset_error_warning_handlers(err_handler, warn_handler)
return tilenum
def isTiled(fp):
"""
Corresponds to TIFFIsTiled
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p]
_LIBTIFF.TIFFIsTiled.argtypes = ARGTYPES
_LIBTIFF.TIFFIsTiled.restype = ctypes.c_int
status = _LIBTIFF.TIFFIsTiled(fp)
_reset_error_warning_handlers(err_handler, warn_handler)
return status
def numberOfStrips(fp):
"""
Corresponds to TIFFNumberOfStrips.
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p]
_LIBTIFF.TIFFNumberOfStrips.argtypes = ARGTYPES
_LIBTIFF.TIFFNumberOfStrips.restype = ctypes.c_uint32
numstrips = _LIBTIFF.TIFFNumberOfStrips(fp)
_reset_error_warning_handlers(err_handler, warn_handler)
return numstrips
def numberOfTiles(fp):
"""
Corresponds to TIFFNumberOfTiles.
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p]
_LIBTIFF.TIFFNumberOfTiles.argtypes = ARGTYPES
_LIBTIFF.TIFFNumberOfTiles.restype = ctypes.c_uint32
numtiles = _LIBTIFF.TIFFNumberOfTiles(fp)
_reset_error_warning_handlers(err_handler, warn_handler)
return numtiles
def readEncodedStrip(fp, stripnum, strip, size=-1):
"""
Corresponds to TIFFReadEncodedStrip
"""
err_handler, warn_handler = _set_error_warning_handlers()
if size == -1:
size = strip.nbytes
ARGTYPES = [
ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p, ctypes.c_int32
]
_LIBTIFF.TIFFReadEncodedStrip.argtypes = ARGTYPES
_LIBTIFF.TIFFReadEncodedStrip.restype = check_error
_LIBTIFF.TIFFReadEncodedStrip(
fp, stripnum, strip.ctypes.data_as(ctypes.c_void_p), size
)
_reset_error_warning_handlers(err_handler, warn_handler)
return strip
def readEncodedTile(fp, tilenum, tile, size=-1):
"""
Corresponds to TIFFComputeTile
"""
err_handler, warn_handler = _set_error_warning_handlers()
if size == -1:
size = tile.nbytes
ARGTYPES = [
ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p, ctypes.c_int32
]
_LIBTIFF.TIFFReadEncodedTile.argtypes = ARGTYPES
_LIBTIFF.TIFFReadEncodedTile.restype = check_error
_LIBTIFF.TIFFReadEncodedTile(
fp, tilenum, tile.ctypes.data_as(ctypes.c_void_p), -1
)
_reset_error_warning_handlers(err_handler, warn_handler)
return tile
def readRGBAStrip(fp, row, strip):
"""
Corresponds to TIFFReadRGBAStrip
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [
ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p
]
_LIBTIFF.TIFFReadRGBAStrip.argtypes = ARGTYPES
_LIBTIFF.TIFFReadRGBAStrip.restype = check_error
_LIBTIFF.TIFFReadRGBAStrip(
fp, row, strip.ctypes.data_as(ctypes.c_void_p)
)
_reset_error_warning_handlers(err_handler, warn_handler)
return strip
def readRGBATile(fp, x, y, tile):
"""
Corresponds to TIFFReadRGBATile
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [
ctypes.c_void_p, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_void_p
]
_LIBTIFF.TIFFReadRGBATile.argtypes = ARGTYPES
_LIBTIFF.TIFFReadRGBATile.restype = check_error
_LIBTIFF.TIFFReadRGBATile(
fp, x, y, tile.ctypes.data_as(ctypes.c_void_p)
)
_reset_error_warning_handlers(err_handler, warn_handler)
return tile
def readRGBAImageOriented(fp, width=None, height=None,
orientation=Orientation.TOPLEFT):
"""
Read an image as if it were RGBA.
This function corresponds to the TIFFReadRGBAImageOriented function in the
libtiff library.
Parameters
----------
fp : ctypes void pointer
File pointer returned by libtiff.
width, height : int
Width and height of the returned image.
orientation : int
The raster origin position.
See Also
--------
Orientation
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [
ctypes.c_void_p, ctypes.c_uint32, ctypes.c_uint32,
ctypes.POINTER(ctypes.c_uint32), ctypes.c_int32, ctypes.c_int32
]
_LIBTIFF.TIFFReadRGBAImageOriented.argtypes = ARGTYPES
_LIBTIFF.TIFFReadRGBAImageOriented.restype = check_error
if width is None:
width = getFieldDefaulted(fp, 'ImageWidth')
if height is None:
height = getFieldDefaulted(fp, 'ImageLength')
img = np.zeros((height, width, 4), dtype=np.uint8)
raster = img.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
_LIBTIFF.TIFFReadRGBAImageOriented(fp, width, height, raster, orientation,
0)
_reset_error_warning_handlers(err_handler, warn_handler)
return img
def writeEncodedStrip(fp, stripnum, stripdata, size=-1):
"""
Corresponds to TIFFWriteEncodedStrip.
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [
ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p, ctypes.c_uint32
]
_LIBTIFF.TIFFWriteEncodedStrip.argtypes = ARGTYPES
_LIBTIFF.TIFFWriteEncodedStrip.restype = check_error
raster = stripdata.ctypes.data_as(ctypes.c_void_p)
if size == -1:
size = stripdata.nbytes
_LIBTIFF.TIFFWriteEncodedStrip(fp, stripnum, raster, size)
_reset_error_warning_handlers(err_handler, warn_handler)
def writeEncodedTile(fp, tilenum, tiledata, size=-1):
"""
Corresponds to TIFFWriteEncodedTile.
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [
ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p, ctypes.c_uint32
]
_LIBTIFF.TIFFWriteEncodedTile.argtypes = ARGTYPES
_LIBTIFF.TIFFWriteEncodedTile.restype = check_error
raster = tiledata.ctypes.data_as(ctypes.c_void_p)
if size == -1:
size = tiledata.nbytes
_LIBTIFF.TIFFWriteEncodedTile(fp, tilenum, raster, size)
_reset_error_warning_handlers(err_handler, warn_handler)
def RGBAImageOK(fp):
"""
Corresponds to TIFFRGBAImageOK.
"""
err_handler, warn_handler = _set_error_warning_handlers()
emsg = ctypes.create_string_buffer(1024)
ARGTYPES = [ctypes.c_void_p, ctypes.c_char_p]
_LIBTIFF.TIFFRGBAImageOK.argtypes = ARGTYPES
_LIBTIFF.TIFFRGBAImageOK.restype = ctypes.c_int
ok = _LIBTIFF.TIFFRGBAImageOK(fp, emsg)
_reset_error_warning_handlers(err_handler, warn_handler)
if ok:
return True
else:
return False
def getFieldDefaulted(fp, tag):
"""
Corresponds to the TIFFGetFieldDefaulted library routine.
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p, ctypes.c_int32]
tag_num = TAGS[tag]['number']
# Append the proper return type for the tag.
tag_type = TAGS[tag]['type']
ARGTYPES.append(ctypes.POINTER(TAGS[tag]['type']))
_LIBTIFF.TIFFGetFieldDefaulted.argtypes = ARGTYPES
_LIBTIFF.TIFFGetFieldDefaulted.restype = check_error
# instantiate the tag value
item = tag_type()
_LIBTIFF.TIFFGetFieldDefaulted(fp, tag_num, ctypes.byref(item))
_reset_error_warning_handlers(err_handler, warn_handler)
return item.value
def getVersion():
"""
Corresponds to the TIFFGetVersion library routine.
"""
try:
_LIBTIFF.TIFFGetVersion.restype = ctypes.c_char_p
except AttributeError:
# libtiff not installed
return '0.0.0'
v = _LIBTIFF.TIFFGetVersion().decode('utf-8')
# v would be something like
#
# LIBTIFF, Version 4.3.0
# Copyright (c) 1988-1996 <NAME>
# Copyright (c) 1991-1996 Silicon Graphics, Inc.
#
# All we want is the '4.3.0'
m = re.search(r'(?P<version>\d+\.\d+\.\d+)', v)
return m.group('version')
def open(filename, mode='r'):
"""
Corresponds to TIFFOpen
Parameters
----------
filename : path or str
Path to TIFF
"""
err_handler, warn_handler = _set_error_warning_handlers()
filename = str(filename)
ARGTYPES = [ctypes.c_char_p, ctypes.c_char_p]
_LIBTIFF.TIFFOpen.argtypes = ARGTYPES
_LIBTIFF.TIFFOpen.restype = ctypes.c_void_p
file_argument = ctypes.c_char_p(filename.encode())
mode_argument = ctypes.c_char_p(mode.encode())
fp = _LIBTIFF.TIFFOpen(file_argument, mode_argument)
_reset_error_warning_handlers(err_handler, warn_handler)
return fp
def setErrorHandler(func=_ERROR_HANDLER):
# The signature of the error handler is
# const char *module, const char *fmt, va_list ap
#
# The return type is void *
_LIBTIFF.TIFFSetErrorHandler.argtypes = [_WFUNCTYPE]
_LIBTIFF.TIFFSetErrorHandler.restype = _WFUNCTYPE
old_error_handler = _LIBTIFF.TIFFSetErrorHandler(func)
return old_error_handler
def setField(fp, tag, value):
"""
Corresponds to TIFFSetField
"""
err_handler, warn_handler = _set_error_warning_handlers()
ARGTYPES = [ctypes.c_void_p, ctypes.c_int32]
# Append the proper return type for the tag.
tag_num = TAGS[tag]['number']
tag_type = TAGS[tag]['type']
ARGTYPES.append(tag_type)
_LIBTIFF.TIFFSetField.argtypes = ARGTYPES
_LIBTIFF.TIFFSetField.restype = check_error
_LIBTIFF.TIFFSetField(fp, tag_num, value)
_reset_error_warning_handlers(err_handler, warn_handler)
def setWarningHandler(func=_WARNING_HANDLER):
# The signature of the warning handler is
# const char *module, const char *fmt, va_list ap
#
# The return type is void *
_LIBTIFF.TIFFSetWarningHandler.argtypes = [_WFUNCTYPE]
_LIBTIFF.TIFFSetWarningHandler.restype = _WFUNCTYPE
old_warning_handler = _LIBTIFF.TIFFSetWarningHandler(func)
return old_warning_handler
def check_error(status):
"""
Set a generic function as the restype attribute of all TIFF
functions that return a int value. This way we do not have to check
for error status in each wrapping function and an exception will always be
appropriately raised.
"""
msg = ''
while not EQ.empty():
msg = EQ.get()
raise LibTIFFError(msg)
if status == 0:
raise RuntimeError('failed')
TAGS = {
'SubFileType': {
'number': 254,
'type': ctypes.c_uint16,
},
'OSubFileType': {
'number': 255,
'type': ctypes.c_uint16,
},
'ImageWidth': {
'number': 256,
'type': ctypes.c_uint32,
},
'ImageLength': {
'number': 257,
'type': ctypes.c_uint32,
},
'BitsPerSample': {
'number': 258,
'type': ctypes.c_uint16,
},
'Compression': {
'number': 259,
'type': ctypes.c_uint16,
},
'Photometric': {
'number': 262,
'type': ctypes.c_uint16,
},
'Threshholding': {
'number': 263,
'type': ctypes.c_uint16,
},
'CellWidth': {
'number': 264,
'type': ctypes.c_uint16,
},
'CellLength': {
'number': 265,
'type': ctypes.c_uint16,
},
'FillOrder': {
'number': 266,
'type': ctypes.c_uint16,
},
'DocumentName': {
'number': 269,
'type': ctypes.c_char_p,
},
'ImageDescription': {
'number': 270,
'type': ctypes.c_char_p,
},
'Make': {
'number': 271,
'type': ctypes.c_char_p,
},
'Model': {
'number': 272,
'type': ctypes.c_char_p,
},
'StripOffsets': {
'number': 273,
'type': (ctypes.c_uint32, ctypes.c_uint64),
},
'Orientation': {
'number': 274,
'type': ctypes.c_uint16,
},
'SamplesPerPixel': {
'number': 277,
'type': ctypes.c_uint16,
},
'RowsPerStrip': {
'number': 278,
'type': ctypes.c_uint16,
},
'StripByteCounts': {
'number': 279,
'type': None,
},
'MinSampleValue': {
'number': 280,
'type': ctypes.c_uint16,
},
'MaxSampleValue': {
'number': 281,
'type': ctypes.c_uint16,
},
'XResolution': {
'number': 282,
'type': ctypes.c_double,
},
'YResolution': {
'number': 283,
'type': ctypes.c_double,
},
'PlanarConfig': {
'number': 284,
'type': ctypes.c_uint16,
},
'PageName': {
'number': 285,
'type': ctypes.c_char_p,
},
'XPosition': {
'number': 286,
'type': ctypes.c_double,
},
'YPosition': {
'number': 287,
'type': ctypes.c_double,
},
'FreeOffsets': {
'number': 288,
'type': ctypes.c_uint32,
},
'FreeByteCounts': {
'number': 289,
'type': ctypes.c_uint32,
},
'GrayResponseUnit': {
'number': 290,
'type': ctypes.c_uint16,
},
'GrayResponseCurve': {
'number': 291,
'type': None,
},
'T4Options': {
'number': 292,
'type': None,
},
'T6Options': {
'number': 293,
'type': None,
},
'ResolutionUnit': {
'number': 296,
'type': ctypes.c_uint16,
},
'PageNumber': {
'number': 297,
'type': (ctypes.c_uint16, ctypes.c_uint16),
},
'TransferFunction': {
'number': 301,
'type': None,
},
'Software': {
'number': 305,
'type': ctypes.c_char_p,
},
'Datetime': {
'number': 306,
'type': ctypes.c_char_p,
},
'Artist': {
'number': 315,
'type': ctypes.c_char_p,
},
'HostComputer': {
'number': 316,
'type': ctypes.c_char_p,
},
'Predictor': {
'number': 317,
'type': ctypes.c_uint16,
},
'WhitePoint': {
'number': 318,
'type': ctypes.c_double,
},
'PrimaryChromaticities': {
'number': 319,
'type': None,
},
'ColorMap': {
'number': 320,
'type': (ctypes.c_uint16, ctypes.c_uint16, ctypes.c_uint16),
},
'HalfToneHints': {
'number': 321,
'type': ctypes.c_uint16,
},
'TileWidth': {
'number': 322,
'type': ctypes.c_uint32,
},
'TileLength': {
'number': 323,
'type': ctypes.c_uint32,
},
'TileOffsets': {
'number': 324,
'type': None,
},
'TileByteCounts': {
'number': 325,
'type': None,
},
'BadFaxLines': {
'number': 326,
'type': None,
},
'CleanFaxData': {
'number': 327,
'type': None,
},
'ConsecutiveBadFaxLines': {
'number': 328,
'type': None,
},
'SubIFDs': {
'number': 330,
'type': None,
},
'InkSet': {
'number': 332,
'type': ctypes.c_uint16,
},
'InkNames': {
'number': 333,
'type': ctypes.c_char_p,
},
'NumberOfInks': {
'number': 334,
'type': ctypes.c_uint16,
},
'DotRange': {
'number': 336,
'type': None,
},
'TargetPrinter': {
'number': 337,
'type': ctypes.c_uint16,
},
'ExtraSamples': {
'number': 338,
'type': ctypes.c_uint16,
},
'SampleFormat': {
'number': 339,
'type': ctypes.c_uint16,
},
'SMinSampleValue': {
'number': 340,
'type': ctypes.c_double,
},
'SMaxSampleValue': {
'number': 341,
'type': ctypes.c_double,
},
'TransferRange': {
'number': 342,
'type': None,
},
'ClipPath': {
'number': 343,
'type': None,
},
'XClipPathUnits': {
'number': 344,
'type': None,
},
'YClipPathUnits': {
'number': 345,
'type': None,
},
'Indexed': {
'number': 346,
'type': None,
},
'JPEGTables': {
'number': 347,
'type': None,
},
'OPIProxy': {
'number': 351,
'type': None,
},
'GlobalParametersIFD': {
'number': 400,
'type': None,
},
'ProfileType': {
'number': 401,
'type': None,
},
'FaxProfile': {
'number': 402,
'type': ctypes.c_uint8,
},
'CodingMethods': {
'number': 403,
'type': None,
},
'VersionYear': {
'number': 404,
'type': None,
},
'ModeNumber': {
'number': 405,
'type': None,
},
'Decode': {
'number': 433,
'type': None,
},
'DefaultImageColor': {
'number': 434,
'type': None,
},
'JPEGProc': {
'number': 512,
'type': None,
},
'JPEGInterchangeFormat': {
'number': 513,
'type': None,
},
'JPEGInterchangeFormatLength': {
'number': 514,
'type': None,
},
'JPEGRestartInterval': {
'number': 515,
'type': None,
},
'JPEGLosslessPredictors': {
'number': 517,
'type': None,
},
'JPEGPointTransforms': {
'number': 518,
'type': None,
},
'JPEGQTables': {
'number': 519,
'type': None,
},
'JPEGDCTables': {
'number': 520,
'type': None,
},
'JPEGACTables': {
'number': 521,
'type': None,
},
'YCbCrCoefficients': {
'number': 529,
'type': (ctypes.c_float, ctypes.c_float, ctypes.c_float),
},
'YCbCrSubsampling': {
'number': 530,
'type': (ctypes.c_uint16, ctypes.c_uint16),
},
'YCbCrPositioning': {
'number': 531,
'type': ctypes.c_uint16,
},
'ReferenceBlackWhite': {
'number': 532,
'type': (ctypes.c_float, ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_float, ctypes.c_float),
},
'StripRowCounts': {
'number': 559,
'type': None,
},
'XMP': {
'number': 700,
'type': ctypes.c_uint8,
},
'ImageID': {
'number': 32781,
'type': None,
},
'Datatype': {
'number': 32996,
'type': None,
},
'WANGAnnotation': {
'number': 32932,
'type': None,
},
'ImageDepth': {
'number': 32997,
'type': None,
},
'TileDepth': {
'number': 32998,
'type': None,
},
'Copyright': {
'number': 33432,
'type': ctypes.c_char_p,
},
'ExposureTime': {
'number': 33434,
'type': ctypes.c_double,
},
'FNumber': {
'number': 33437,
'type': ctypes.c_double,
},
'MDFile': {
'number': 33445,
'type': None,
},
'MDScalePixel': {
'number': 33446,
'type': None,
},
'MDColorTable': {
'number': 33447,
'type': None,
},
'MDLabName': {
'number': 33448,
'type': None,
},
'MDSampleInfo': {
'number': 33449,
'type': None,
},
'MdPrepDate': {
'number': 33450,
'type': None,
},
'MDPrepTime': {
'number': 33451,
'type': None,
},
'MDFileUnits': {
'number': 33452,
'type': None,
},
'ModelPixelScale': {
'number': 33550,
'type': None,
},
'IPTC': {
'number': 33723,
'type': None,
},
'INGRPacketData': {
'number': 33918,
'type': None,
},
'INGRFlagRegisters': {
'number': 33919,
'type': None,
},
'IRASbTransformationMatrix': {
'number': 33920,
'type': None,
},
'ModelTiePoint': {
'number': 33922,
'type': None,
},
'ModelTransformation': {
'number': 34264,
'type': None,
},
'Photoshop': {
'number': 34377,
'type': None,
},
'ExifIFD': {
'number': 34665,
'type': ctypes.c_int32,
},
'ICCProfile': {
'number': 34675,
'type': None,
},
'ImageLayer': {
'number': 34732,
'type': None,
},
'GeoKeyDirectory': {
'number': 34735,
'type': None,
},
'GeoDoubleParams': {
'number': 34736,
'type': None,
},
'GeoASCIIParams': {
'number': 34737,
'type': None,
},
'ExposureProgram': {
'number': 34850,
'type': ctypes.c_uint16,
},
'GPSIFD': {
'number': 34853,
'type': None,
},
'ISOSpeedRatings': {
'number': 34855,
'type': ctypes.c_uint16,
},
'HYLAFAXRecvParams': {
'number': 34908,
'type': None,
},
'HYLAFAXSubAddress': {
'number': 34909,
'type': None,
},
'HYLAFAXRecvTime': {
'number': 34910,
'type': None,
},
'ExifVersion': {
'number': 36864,
'type': ctypes.c_uint8,
},
'CompressedBitsPerPixel': {
'number': 37122,
'type': ctypes.c_uint8,
},
'ShutterSpeedValue': {
'number': 37377,
'type': ctypes.c_double,
},
'ApertureValue': {
'number': 37378,
'type': ctypes.c_double,
},
'BrightnessValue': {
'number': 37379,
'type': ctypes.c_double,
},
'ExposureBiasValue': {
'number': 37380,
'type': ctypes.c_double,
},
'MaxApertureValue': {
'number': 37381,
'type': ctypes.c_double,
},
'SubjectDistance': {
'number': 37382,
'type': ctypes.c_double,
},
'MeteringMode': {
'number': 37383,
'type': ctypes.c_uint16,
},
'LightSource': {
'number': 37384,
'type': ctypes.c_uint16,
},
'Flash': {
'number': 37385,
'type': ctypes.c_uint16,
},
'FocalLength': {
'number': 37386,
'type': ctypes.c_double,
},
'ImageSourceData': {
'number': 37724,
'type': None,
},
'ColorSpace': {
'number': 40961,
'type': ctypes.c_uint16,
},
'PixelXDimension': {
'number': 40962,
'type': ctypes.c_uint64,
},
'PixelYDimension': {
'number': 40963,
'type': ctypes.c_uint64,
},
'InteroperabilityIFD': {
'number': 40965,
'type': None,
},
'FocalPlaneXResolution': {
'number': 41486,
'type': ctypes.c_double,
},
'FocalPlaneYResolution': {
'number': 41487,
'type': ctypes.c_double,
},
'FocalPlaneResolutionUnit': {
'number': 41488,
'type': ctypes.c_uint16,
},
'ExposureIndex': {
'number': 41493,
'type': ctypes.c_double,
},
'SensingMethod': {
'number': 41495,
'type': ctypes.c_uint16,
},
'FileSource': {
'number': 41728,
'type': ctypes.c_uint8,
},
'SceneType': {
'number': 41729,
'type': ctypes.c_uint8,
},
'ExposureMode': {
'number': 41986,
'type': ctypes.c_uint16,
},
'WhiteBalance': {
'number': 41987,
'type': ctypes.c_uint16,
},
'DigitalZoomRatio': {
'number': 41988,
'type': ctypes.c_double,
},
'FocalLengthIn35mmFilm': {
'number': 41989,
'type': ctypes.c_uint16,
},
'SceneCaptureType': {
'number': 41990,
'type': ctypes.c_uint16,
},
'GainControl': {
'number': 41991,
'type': ctypes.c_uint16,
},
'Contrast': {
'number': 41992,
'type': ctypes.c_uint16,
},
'Saturation': {
'number': 41993,
'type': ctypes.c_uint16,
},
'Sharpness': {
'number': 41994,
'type': ctypes.c_uint16,
},
'SubjectDistanceRange': {
'number': 41996,
'type': ctypes.c_uint16,
},
'GDAL_Metadata': {
'number': 42112,
'type': None,
},
'GDAL_NoData': {
'number': 42113,
'type': None,
},
'OCEScanJobDescription': {
'number': 50215,
'type': None,
},
'OCEApplicationSelector': {
'number': 50216,
'type': None,
},
'OCEIdentificationNumber': {
'number': 50217,
'type': None,
},
'OCEImageLogicCharacteristics': {
'number': 50218,
'type': None,
},
'DNGVersion': {
'number': 50706,
'type': None,
},
'DNGBackwardVersion': {
'number': 50707,
'type': None,
},
'UniqueCameraModel': {
'number': 50708,
'type': None,
},
'LocalizedCameraModel': {
'number': 50709,
'type': None,
},
'CFAPlaneColor': {
'number': 50710,
'type': None,
},
'CFALayout': {
'number': 50711,
'type': None,
},
'LinearizationTable': {
'number': 50712,
'type': None,
},
'BlackLevelRepeatDim': {
'number': 50713,
'type': None,
},
'BlackLevel': {
'number': 50714,
'type': None,
},
'BlackLevelDeltaH': {
'number': 50715,
'type': None,
},
'BlackLevelDeltaV': {
'number': 50716,
'type': None,
},
'WhiteLevel': {
'number': 50717,
'type': None,
},
'DefaultScale': {
'number': 50718,
'type': None,
},
'DefaultCropOrigin': {
'number': 50719,
'type': None,
},
'DefaultCropSize': {
'number': 50720,
'type': None,
},
'ColorMatrix1': {
'number': 50721,
'type': None,
},
'ColorMatrix2': {
'number': 50722,
'type': None,
},
'CameraCalibration1': {
'number': 50723,
'type': None,
},
'CameraCalibration2': {
'number': 50724,
'type': None,
},
'ReductionMatrix1': {
'number': 50725,
'type': None,
},
'ReductionMatrix2': {
'number': 50726,
'type': None,
},
'AnalogBalance': {
'number': 50727,
'type': None,
},
'AsShotNeutral': {
'number': 50728,
'type': None,
},
'AsShotWhiteXY': {
'number': 50729,
'type': None,
},
'BaselineExposure': {
'number': 50730,
'type': None,
},
'BaselineNoise': {
'number': 50731,
'type': None,
},
'BaselineSharpness': {
'number': 50732,
'type': None,
},
'BayerGreenSplit': {
'number': 50733,
'type': None,
},
'LinearResponseLimit': {
'number': 50734,
'type': None,
},
'CameraSerialNumber': {
'number': 50735,
'type': None,
},
'LensInfo': {
'number': 50736,
'type': None,
},
'ChromaBlurRadius': {
'number': 50737,
'type': None,
},
'AntiAliasStrength': {
'number': 50738,
'type': None,
},
'DNGPrivateData': {
'number': 50740,
'type': None,
},
'MakerNoteSafety': {
'number': 50741,
'type': None,
},
'CalibrationIllumintant1': {
'number': 50778,
'type': None,
},
'CalibrationIllumintant2': {
'number': 50779,
'type': None,
},
'BestQualityScale': {
'number': 50780,
'type': None,
},
'AliasLayerMetadata': {
'number': 50784,
'type': None,
},
'TIFF_RSID': {
'number': 50908,
'type': None,
},
'GEO_Metadata': {
'number': 50909,
'type': None,
},
'JPEGQuality': {
'number': 65537,
'type': ctypes.c_int32,
},
'JPEGColorMode': {
'number': 65538,
'type': ctypes.c_int32,
},
}
# We need the reverse mapping as well.
tagnum2name = {value['number']: key for key, value in TAGS.items()}
| [
"ctypes.byref",
"numpy.zeros",
"ctypes.create_string_buffer",
"ctypes.CFUNCTYPE",
"warnings.warn",
"re.search",
"queue.Queue",
"ctypes.POINTER"
] | [((248, 261), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (259, 261), False, 'import queue\n'), ((6475, 6564), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['ctypes.c_void_p', 'ctypes.c_char_p', 'ctypes.c_char_p', 'ctypes.c_void_p'], {}), '(ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.\n c_void_p)\n', (6491, 6564), False, 'import ctypes\n'), ((5454, 5487), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(1000)'], {}), '(1000)\n', (5481, 5487), False, 'import ctypes\n'), ((6036, 6069), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(1000)'], {}), '(1000)\n', (6063, 6069), False, 'import ctypes\n'), ((6387, 6409), 'warnings.warn', 'warnings.warn', (['message'], {}), '(message)\n', (6400, 6409), False, 'import warnings\n'), ((13022, 13066), 'numpy.zeros', 'np.zeros', (['(height, width, 4)'], {'dtype': 'np.uint8'}), '((height, width, 4), dtype=np.uint8)\n', (13030, 13066), True, 'import numpy as np\n'), ((14716, 14749), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['(1024)'], {}), '(1024)\n', (14743, 14749), False, 'import ctypes\n'), ((16284, 16331), 're.search', 're.search', (['"""(?P<version>\\\\d+\\\\.\\\\d+\\\\.\\\\d+)"""', 'v'], {}), "('(?P<version>\\\\d+\\\\.\\\\d+\\\\.\\\\d+)', v)\n", (16293, 16331), False, 'import re\n'), ((12668, 12699), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (12682, 12699), False, 'import ctypes\n'), ((13099, 13130), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (13113, 13130), False, 'import ctypes\n'), ((15432, 15465), 'ctypes.POINTER', 'ctypes.POINTER', (["TAGS[tag]['type']"], {}), "(TAGS[tag]['type'])\n", (15446, 15465), False, 'import ctypes\n'), ((15683, 15701), 'ctypes.byref', 'ctypes.byref', (['item'], {}), '(item)\n', (15695, 15701), False, 'import ctypes\n')] |
import sys
sys.path.append('../')
from pathlib import Path
import shutil
import numpy as np
import pickle
from py_diff_pd.common.common import ndarray, create_folder
from py_diff_pd.common.common import print_info, print_ok, print_error
from py_diff_pd.common.hex_mesh import hex2obj_with_textures, filter_hex
from py_diff_pd.common.grad_check import check_gradients
from py_diff_pd.core.py_diff_pd_core import HexMesh3d, HexDeformable, StdRealVector
from py_diff_pd.env.quadruped_env_3d import QuadrupedEnv3d
if __name__ == '__main__':
seed = 42
folder = Path('quadruped_3d')
refinement = 3
act_max = 1.0
youngs_modulus = 1e6
poissons_ratio = 0.49
leg_z_length = 2
body_x_length = 4
body_y_length = 4
body_z_length = 1
env = QuadrupedEnv3d(seed, folder, { 'refinement': refinement,
'youngs_modulus': youngs_modulus,
'poissons_ratio': poissons_ratio,
'leg_z_length': leg_z_length,
'body_x_length': body_x_length,
'body_y_length': body_y_length,
'body_z_length': body_z_length,
'spp': 64 })
deformable = env.deformable()
leg_indices = env.leg_indices()
act_indices = env.act_indices()
# Optimization parameters.
thread_ct = 8
opt = { 'max_pd_iter': 500, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': thread_ct,
'use_bfgs': 1, 'bfgs_history_size': 10 }
method = 'pd_eigen'
dt = 1e-2
# We optimize quadruped for 100 frames but we can use larger frames for verifying the open-loop controller.
frame_num = 400
# Load results.
folder = Path('quadruped_3d')
thread_ct = 8
data_file = folder / 'data_{:04d}_threads.bin'.format(thread_ct)
data = pickle.load(open(data_file, 'rb'))
# Compute the initial state.
dofs = deformable.dofs()
act_dofs = deformable.act_dofs()
q0 = env.default_init_position()
init_offset = ndarray([0, 0, 0.025])
q0 = (q0.reshape((-1, 3)) + init_offset).ravel()
v0 = np.zeros(dofs)
f0 = [np.zeros(dofs) for _ in range(frame_num)]
def variable_to_acts(x):
A_f, A_b, w = x
a = [np.zeros(act_dofs) for _ in range(frame_num)]
for i in range(frame_num):
for key, indcs in leg_indices.items():
if key[-1] == 'F':
for idx in indcs:
if key[0] == 'F':
a[i][idx] = act_max * (1 + A_f * np.sin(w * i)) / 2
else:
a[i][idx] = act_max * (1 + A_b * np.sin(w * i)) / 2
else:
for idx in indcs:
if key[0] =='F':
a[i][idx] = act_max * (1 - A_f * np.sin(w * i)) / 2
else:
a[i][idx] = act_max * (1 - A_b * np.sin(w * i)) / 2
return a
def simulate(x, vis_folder):
a = variable_to_acts(x)
env.simulate(dt, frame_num, method, opt, q0, v0, a, f0, require_grad=False, vis_folder=vis_folder)
# Initial guess and final results.
x_init = data[method][0]['x']
x_final = data[method][-1]['x']
simulate(x_init, 'init')
simulate(x_final, 'final')
# Assemble muscles.
muscle_idx = act_indices
not_muscle_idx = []
all_idx = np.zeros(env.element_num())
for idx in muscle_idx:
all_idx[idx] = 1
for idx in range(env.element_num()):
if all_idx[idx] == 0:
not_muscle_idx.append(idx)
# Reconstruct muscle groups.
muscle_groups = {}
for key, val in leg_indices.items():
muscle_groups[key] = [act_indices[v] for v in val]
def gather_act(act):
reduced_act = {}
for key, val in leg_indices.items():
reduced_act[key] = act[val[0]]
for v in val:
assert act[v] == act[val[0]]
return reduced_act
def generate_mesh(vis_folder, mesh_folder, x_var):
create_folder(folder / mesh_folder)
act = variable_to_acts(x_var)
color_num = 11
duv = 1 / color_num
for i in range(frame_num):
frame_folder = folder / mesh_folder / '{:d}'.format(i)
create_folder(frame_folder)
# Generate body.bin.
input_bin_file = folder / vis_folder / '{:04d}.bin'.format(i)
shutil.copyfile(input_bin_file, frame_folder / 'body.bin')
# Generate body.obj.
mesh = HexMesh3d()
mesh.Initialize(str(frame_folder / 'body.bin'))
hex2obj_with_textures(mesh, obj_file_name=frame_folder / 'body.obj')
# Generate action.npy.
frame_act = gather_act(act[i])
frame_act_flattened = []
for _, a in frame_act.items():
frame_act_flattened.append(a)
np.save(frame_folder / 'action.npy', ndarray(frame_act_flattened))
# Generate muscle/
create_folder(frame_folder / 'muscle')
cnt = 0
for key, group in muscle_groups.items():
sub_mesh = filter_hex(mesh, group)
a = frame_act[key]
v = a / act_max
assert 0 <= v <= 1
v_lower = np.floor(v / duv)
if v_lower == color_num: v_lower -= 1
v_upper = v_lower + 1
texture_map = ((0, v_lower * duv), (1, v_lower * duv), (1, v_upper * duv), (0, v_upper * duv))
hex2obj_with_textures(sub_mesh, obj_file_name=frame_folder / 'muscle' / '{:d}.obj'.format(cnt),
texture_map=texture_map)
cnt += 1
# Generate not_muscle.obj.
sub_mesh = filter_hex(mesh, not_muscle_idx)
hex2obj_with_textures(sub_mesh, obj_file_name=frame_folder / 'not_muscle.obj')
generate_mesh('init', 'init_mesh', x_init)
generate_mesh('final', 'final_mesh', x_final) | [
"sys.path.append",
"py_diff_pd.common.hex_mesh.hex2obj_with_textures",
"py_diff_pd.common.hex_mesh.filter_hex",
"py_diff_pd.common.common.create_folder",
"numpy.floor",
"numpy.zeros",
"py_diff_pd.common.common.ndarray",
"pathlib.Path",
"py_diff_pd.core.py_diff_pd_core.HexMesh3d",
"numpy.sin",
"s... | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((567, 587), 'pathlib.Path', 'Path', (['"""quadruped_3d"""'], {}), "('quadruped_3d')\n", (571, 587), False, 'from pathlib import Path\n'), ((773, 1046), 'py_diff_pd.env.quadruped_env_3d.QuadrupedEnv3d', 'QuadrupedEnv3d', (['seed', 'folder', "{'refinement': refinement, 'youngs_modulus': youngs_modulus,\n 'poissons_ratio': poissons_ratio, 'leg_z_length': leg_z_length,\n 'body_x_length': body_x_length, 'body_y_length': body_y_length,\n 'body_z_length': body_z_length, 'spp': 64}"], {}), "(seed, folder, {'refinement': refinement, 'youngs_modulus':\n youngs_modulus, 'poissons_ratio': poissons_ratio, 'leg_z_length':\n leg_z_length, 'body_x_length': body_x_length, 'body_y_length':\n body_y_length, 'body_z_length': body_z_length, 'spp': 64})\n", (787, 1046), False, 'from py_diff_pd.env.quadruped_env_3d import QuadrupedEnv3d\n'), ((1626, 1646), 'pathlib.Path', 'Path', (['"""quadruped_3d"""'], {}), "('quadruped_3d')\n", (1630, 1646), False, 'from pathlib import Path\n'), ((1935, 1957), 'py_diff_pd.common.common.ndarray', 'ndarray', (['[0, 0, 0.025]'], {}), '([0, 0, 0.025])\n', (1942, 1957), False, 'from py_diff_pd.common.common import ndarray, create_folder\n'), ((2020, 2034), 'numpy.zeros', 'np.zeros', (['dofs'], {}), '(dofs)\n', (2028, 2034), True, 'import numpy as np\n'), ((2045, 2059), 'numpy.zeros', 'np.zeros', (['dofs'], {}), '(dofs)\n', (2053, 2059), True, 'import numpy as np\n'), ((3983, 4018), 'py_diff_pd.common.common.create_folder', 'create_folder', (['(folder / mesh_folder)'], {}), '(folder / mesh_folder)\n', (3996, 4018), False, 'from py_diff_pd.common.common import ndarray, create_folder\n'), ((2154, 2172), 'numpy.zeros', 'np.zeros', (['act_dofs'], {}), '(act_dofs)\n', (2162, 2172), True, 'import numpy as np\n'), ((4222, 4249), 'py_diff_pd.common.common.create_folder', 'create_folder', (['frame_folder'], {}), '(frame_folder)\n', (4235, 4249), False, 'from py_diff_pd.common.common import ndarray, create_folder\n'), ((4370, 4428), 'shutil.copyfile', 'shutil.copyfile', (['input_bin_file', "(frame_folder / 'body.bin')"], {}), "(input_bin_file, frame_folder / 'body.bin')\n", (4385, 4428), False, 'import shutil\n'), ((4482, 4493), 'py_diff_pd.core.py_diff_pd_core.HexMesh3d', 'HexMesh3d', ([], {}), '()\n', (4491, 4493), False, 'from py_diff_pd.core.py_diff_pd_core import HexMesh3d, HexDeformable, StdRealVector\n'), ((4566, 4634), 'py_diff_pd.common.hex_mesh.hex2obj_with_textures', 'hex2obj_with_textures', (['mesh'], {'obj_file_name': "(frame_folder / 'body.obj')"}), "(mesh, obj_file_name=frame_folder / 'body.obj')\n", (4587, 4634), False, 'from py_diff_pd.common.hex_mesh import hex2obj_with_textures, filter_hex\n'), ((4963, 5001), 'py_diff_pd.common.common.create_folder', 'create_folder', (["(frame_folder / 'muscle')"], {}), "(frame_folder / 'muscle')\n", (4976, 5001), False, 'from py_diff_pd.common.common import ndarray, create_folder\n'), ((5720, 5752), 'py_diff_pd.common.hex_mesh.filter_hex', 'filter_hex', (['mesh', 'not_muscle_idx'], {}), '(mesh, not_muscle_idx)\n', (5730, 5752), False, 'from py_diff_pd.common.hex_mesh import hex2obj_with_textures, filter_hex\n'), ((5765, 5843), 'py_diff_pd.common.hex_mesh.hex2obj_with_textures', 'hex2obj_with_textures', (['sub_mesh'], {'obj_file_name': "(frame_folder / 'not_muscle.obj')"}), "(sub_mesh, obj_file_name=frame_folder / 'not_muscle.obj')\n", (5786, 5843), False, 'from py_diff_pd.common.hex_mesh import hex2obj_with_textures, filter_hex\n'), ((4889, 4917), 'py_diff_pd.common.common.ndarray', 'ndarray', (['frame_act_flattened'], {}), '(frame_act_flattened)\n', (4896, 4917), False, 'from py_diff_pd.common.common import ndarray, create_folder\n'), ((5102, 5125), 'py_diff_pd.common.hex_mesh.filter_hex', 'filter_hex', (['mesh', 'group'], {}), '(mesh, group)\n', (5112, 5125), False, 'from py_diff_pd.common.hex_mesh import hex2obj_with_textures, filter_hex\n'), ((5254, 5271), 'numpy.floor', 'np.floor', (['(v / duv)'], {}), '(v / duv)\n', (5262, 5271), True, 'import numpy as np\n'), ((2462, 2475), 'numpy.sin', 'np.sin', (['(w * i)'], {}), '(w * i)\n', (2468, 2475), True, 'import numpy as np\n'), ((2572, 2585), 'numpy.sin', 'np.sin', (['(w * i)'], {}), '(w * i)\n', (2578, 2585), True, 'import numpy as np\n'), ((2753, 2766), 'numpy.sin', 'np.sin', (['(w * i)'], {}), '(w * i)\n', (2759, 2766), True, 'import numpy as np\n'), ((2863, 2876), 'numpy.sin', 'np.sin', (['(w * i)'], {}), '(w * i)\n', (2869, 2876), True, 'import numpy as np\n')] |
from segmentation_models import Unet, Nestnet, Xnet
from data.load_data import load_data
import numpy as np
from PIL import Image
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
NCLASSES = 2
HEIGHT = 544
WIDTH = 544
def generate_arrays_from_file(lines, batch_size):
# 获取总长度
n = len(lines)
i = 0
while 1:
X_train = []
Y_train = []
# 获取一个batch_size大小的数据
for _ in range(batch_size):
if i == 0:
np.random.shuffle(lines)
name = lines[i].split(';')[0]
# 从文件中读取图像
img = Image.open("C:\\Users\\admin\\dongwei\\workspace\\dataset\\defeat_seg\\jpg" + '\\' + name)
img = img.resize((WIDTH, HEIGHT))
img = np.array(img)
img = img / 255
X_train.append(img)
name = (lines[i].split(';')[1]).replace("\n", "")
# 从文件中读取图像
img = Image.open("C:\\Users\\admin\\dongwei\\workspace\\dataset\\defeat_seg\\png" + '\\' + name)
img = img.resize((WIDTH, HEIGHT))
img = np.array(img)
seg_labels = np.zeros((WIDTH, HEIGHT, NCLASSES))
for c in range(NCLASSES):
seg_labels[:, :, c] = (img[:, :, 0] == c).astype(int)
# seg_labels = np.reshape(seg_labels, (-1, NCLASSES))
Y_train.append(seg_labels)
# 读完一个周期后重新开始
i = (i + 1) % n
yield (np.array(X_train), np.array(Y_train))
# prepare data
dataset_path = 'C:\\Users\\admin\\dongwei\\workspace\\dataset\\defeat_seg'
# range in [0,1], the network expects input channels of 3
# x, y = load_data(root_dir=dataset_path, contents=['jpg', 'png'])
# prepare model
# build UNet++
model = Xnet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose', classes=NCLASSES)
# model = Unet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net
# model = NestNet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build DLA
model.compile('Adam', 'binary_crossentropy', ['binary_accuracy'])
# train model
# model.fit(x, y)
batch_size = 2
with open("C:\\Users\\admin\\dongwei\\workspace\\dataset\\defeat_seg\\train.txt", "r") as f:
lines = f.readlines()
# 90%用于训练,10%用于估计。
num_val = int(len(lines) * 0.1)
num_train = len(lines) - num_val
log_dir = "logs/"
checkpoint_period = ModelCheckpoint(
log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss',
save_weights_only=True,
save_best_only=True,
period=1)
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=3,
verbose=1)
early_stopping = EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=10,
verbose=1)
model.fit_generator(generate_arrays_from_file(lines[:num_train], batch_size),
steps_per_epoch=max(1, num_train // batch_size),
validation_data=generate_arrays_from_file(lines[num_train:], batch_size),
validation_steps=max(1, num_val // batch_size),
epochs=50,
initial_epoch=0,
callbacks=[checkpoint_period, reduce_lr, early_stopping])
| [
"keras.callbacks.ModelCheckpoint",
"numpy.zeros",
"PIL.Image.open",
"segmentation_models.Xnet",
"keras.callbacks.EarlyStopping",
"numpy.array",
"keras.callbacks.ReduceLROnPlateau",
"numpy.random.shuffle"
] | [((1763, 1875), 'segmentation_models.Xnet', 'Xnet', ([], {'backbone_name': '"""resnet50"""', 'encoder_weights': '"""imagenet"""', 'decoder_block_type': '"""transpose"""', 'classes': 'NCLASSES'}), "(backbone_name='resnet50', encoder_weights='imagenet',\n decoder_block_type='transpose', classes=NCLASSES)\n", (1767, 1875), False, 'from segmentation_models import Unet, Nestnet, Xnet\n'), ((2459, 2626), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5')"], {'monitor': '"""val_loss"""', 'save_weights_only': '(True)', 'save_best_only': '(True)', 'period': '(1)'}), "(log_dir +\n 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor=\n 'val_loss', save_weights_only=True, save_best_only=True, period=1)\n", (2474, 2626), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2651, 2723), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(3)', 'verbose': '(1)'}), "(monitor='val_loss', factor=0.5, patience=3, verbose=1)\n", (2668, 2723), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2758, 2828), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='val_loss', min_delta=0, patience=10, verbose=1)\n", (2771, 2828), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((622, 716), 'PIL.Image.open', 'Image.open', (["('C:\\\\Users\\\\admin\\\\dongwei\\\\workspace\\\\dataset\\\\defeat_seg\\\\jpg' + '\\\\' + name\n )"], {}), "('C:\\\\Users\\\\admin\\\\dongwei\\\\workspace\\\\dataset\\\\defeat_seg\\\\jpg' +\n '\\\\' + name)\n", (632, 716), False, 'from PIL import Image\n'), ((777, 790), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (785, 790), True, 'import numpy as np\n'), ((955, 1049), 'PIL.Image.open', 'Image.open', (["('C:\\\\Users\\\\admin\\\\dongwei\\\\workspace\\\\dataset\\\\defeat_seg\\\\png' + '\\\\' + name\n )"], {}), "('C:\\\\Users\\\\admin\\\\dongwei\\\\workspace\\\\dataset\\\\defeat_seg\\\\png' +\n '\\\\' + name)\n", (965, 1049), False, 'from PIL import Image\n'), ((1110, 1123), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1118, 1123), True, 'import numpy as np\n'), ((1149, 1184), 'numpy.zeros', 'np.zeros', (['(WIDTH, HEIGHT, NCLASSES)'], {}), '((WIDTH, HEIGHT, NCLASSES))\n', (1157, 1184), True, 'import numpy as np\n'), ((514, 538), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (531, 538), True, 'import numpy as np\n'), ((1468, 1485), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (1476, 1485), True, 'import numpy as np\n'), ((1487, 1504), 'numpy.array', 'np.array', (['Y_train'], {}), '(Y_train)\n', (1495, 1504), True, 'import numpy as np\n')] |
"""Flashed images perturbed with checkerboard"""
import array
import io
import math
# import matplotlib.pyplot as plt
import numpy as np
import os
import scipy
import scipy.interpolate
import scipy.ndimage
import tempfile
import tqdm
# from mpl_toolkits.axes_grid1 import make_axes_locatable
from PIL.Image import fromarray
from PIL.Image import open as open_image
from urllib.parse import urlparse
from urllib.request import urlopen, urlretrieve
from pystim.io.bin import open_file as open_bin_file
from pystim.io.vec import open_file as open_vec_file
from pystim.io.csv import open_file as open_csv_file
from pystim.utils import handle_arguments_and_configurations
from pystim.utils import get_grey_frame
name = 'fipwc'
default_configuration = {
'path': os.path.join(tempfile.gettempdir(), "pystim", name),
'reference_images': { # a selection of promising images
0: ('reference', 0), # i.e. grey
1: ('<NAME>', 5),
2: ('<NAME>', 31),
3: ('<NAME>', 46),
# 4: ('<NAME>', 39),
},
'perturbations': {
# 'pattern_indices': list(range(0, 2)),
# 'pattern_indices': list(range(0, 18)), # TODO
'pattern_indices': list(range(0, 9)), # TODO
# 'amplitudes': [float(a) / float(256) for a in [10, 28]], # TODO remove this line?
# 'amplitudes': [float(a) / float(256) for a in [-28, +28]],
# 'amplitudes': [float(a) / float(256) for a in [2, 4, 7, 10, 14, 18, 23, 28]], # TODO
'amplitudes': [float(a) / float(256) for a in [-30, -20, -15, -10, +10, +15, +20, +30]],
# 'nb_horizontal_checks': 60,
# 'nb_vertical_checks': 60,
# 'nb_horizontal_checks': 57,
# 'nb_vertical_checks': 57,
'nb_horizontal_checks': 56,
'nb_vertical_checks': 56,
# 'resolution': 50.0, # µm / pixel
'resolution': float(15) * 3.5, # µm / pixel
'with_random_patterns': True,
},
# 'display_rate': 50.0, # Hz # TODO
'display_rate': 40.0, # Hz
'frame': {
'width': 864,
'height': 864,
'duration': 0.3, # s
'resolution': 3.5, # µm / pixel # fixed by the setup
},
# image_resolution = 3.3 # µm / pixel # fixed by the monkey eye
# 'image_resolution': 0.8, # µm / pixel # fixed by the salamander eye
'image_resolution': 3.5, # µm / pixel # fixed by hand # TODO correct.
'background_luminance': 0.5, # arb. unit
'mean_luminance': 0.5, # arb. unit
# 'std_luminance': 0.06, # arb. unit
'std_luminance': 0.2, # arb. unit
'nb_repetitions': 10,
# 'nb_repetitions': 2,
}
def load_resource(url):
with urlopen(url) as handle:
resource = handle.read()
return resource
def get_palmer_resource_locator(path, index):
assert 1 <= index <= 6
frame_index = 1200 * (index - 1) + 600
filename = "frame{i:04d}.png".format(i=frame_index)
path = os.path.join(path, filename)
url = "file://localhost" + path
return url
def get_palmer_resource_locators(path=None, indices=None):
if indices is None:
indices = range(1, 7)
urls = [
get_palmer_resource_locator(path, index)
for index in indices
]
return urls
def convert_palmer_resource_to_image(resource):
data = io.BytesIO(resource)
image = open_image(data)
data = image.getdata()
data = np.array(data, dtype=np.uint8)
image = data.reshape(image.size)
image = image.astype(np.float)
image = image / (2.0 ** 8)
return image
def get_van_hateren_resource_locator(index, format_='iml', scheme='file', path=None, mirror='Lies'):
filename = "imk{:05d}.{}".format(index, format_)
if scheme == 'file':
assert path is not None
netloc = "localhost"
path = os.path.join(path, filename)
url = "{}://{}/{}".format(scheme, netloc, path)
elif scheme == 'http':
if mirror == 'Lies':
assert 1 <= index <= 4212
assert format_ in ['iml', 'imc']
url_string = "http://cin-11.medizin.uni-tuebingen.de:61280/vanhateren/{f}/imk{i:05d}.{f}"
url = url_string.format(i=index, f=format_)
elif mirror == 'Ivanov':
assert 1 <= index <= 99
if format_ == 'iml':
url = "http://pirsquared.org/research/vhatdb/imk{i:05d}.{f}".format(i=index, f=format_)
elif format_ == 'imc':
url = "http://pirsquared.org/research/vhatdb/{f}/imk{i:05d}.{f}".format(i=index, f=format_)
else:
raise ValueError("unexpected format value: {}".format(format_))
else:
raise ValueError("unexpected mirror value: {}".format(mirror))
else:
raise ValueError("unexpected scheme value: {}".format(scheme))
return url
def get_van_hateren_resource_locators(path=None, indices=None, format_='iml', mirror='Lies'):
assert format_ in ['iml', 'imc']
if indices is None:
if mirror == 'Lies':
indices = range(0, 4213)
elif mirror == 'Ivanov':
indices = range(1, 100)
else:
raise ValueError("unexpected mirror value: {}".format(mirror))
urls = [
get_van_hateren_resource_locator(index, format_=format_, path=path, mirror=mirror)
for index in indices
]
return urls
def convert_van_hateren_resource_to_image(resource):
data = array.array('H', resource)
data.byteswap()
data = np.array(data, dtype=np.uint16)
image = data.reshape(1024, 1536)
image = image.astype(np.float)
image = image / (2.0 ** (12 + 1))
return image
def get_checkerboard_locator(index, scheme='file', path=None):
filename = "checkerboard{:05d}.png".format(index)
if scheme == 'file':
netloc = "localhost"
path = os.path.join(path, filename)
url = "{}://{}/{}".format(scheme, netloc, path)
else:
raise ValueError("unexpected scheme value: {}".format(scheme))
return url
def get_reference_locator(index, scheme='file', path=None):
filename = "reference_{:05d}.png".format(index)
if scheme == 'file':
netloc = "localhost"
path = os.path.join(path, filename)
url = "{}://{}/{}".format(scheme, netloc, path)
else:
raise ValueError("unexpected scheme value: {}".format(scheme))
return url
def get_resource_locator(index, dataset='van Hateren', **kwargs):
if dataset == 'van Hateren':
url = get_van_hateren_resource_locator(index, **kwargs)
elif dataset == 'Palmer':
url = get_palmer_resource_locator(index, **kwargs)
elif dataset == 'checkerboard':
url = get_checkerboard_locator(index, **kwargs)
elif dataset == 'reference':
url = get_reference_locator(index, **kwargs)
else:
raise ValueError("unexpected dataset value: {}".format(dataset))
return url
def is_resource(url):
url = urlparse(url)
if url.scheme == 'file':
path = url.path[1:]
ans = os.path.isfile(path)
print("{} is resource: {}".format(path, ans))
elif url.scheme == 'http':
ans = True
else:
raise ValueError("unexpected url scheme: {}".format(url.scheme))
return ans
def is_available_online(dataset):
return dataset in ['van Hateren', 'Palmer']
def generate_reference_image(reference_index, dataset='reference', index=0, path=None):
url = get_resource_locator(reference_index, dataset=dataset, scheme='file', path=path)
url = urlparse(url)
path = url.path[1:]
# Generate reference image.
if index == 0:
height = 864 # px # similar to the van Hateren dataset
width = 864 # px # similar to the van Hateren dataset
shape = (height, width)
dtype = np.uint8
info = np.iinfo(dtype)
v = (info.max - info.min + 1) // 2
a = v * np.ones(shape, dtype=dtype)
image = fromarray(a)
image.save(path)
else:
raise NotImplementedError()
return
def collect_reference_image(reference_index, dataset='van Hateren', index=0, config=None, **kwargs):
local_url = get_resource_locator(reference_index, dataset='reference', scheme='file', **kwargs)
if not is_resource(local_url):
if is_available_online(dataset):
# Retrieve image.
remote_url = get_resource_locator(index, dataset=dataset, scheme='http', **kwargs)
local_url = get_resource_locator(reference_index, dataset=dataset, scheme='file', **kwargs)
local_url = urlparse(local_url)
local_path = local_url.path[1:]
urlretrieve(remote_url, local_path)
# Process image.
image = load_reference_image_old(reference_index, path=kwargs['path'])
image = get_reference_frame(image, config)
image = float_frame_to_uint8_frame(image)
local_url = get_resource_locator(reference_index, dataset='reference', scheme='file', **kwargs)
local_url = urlparse(local_url)
local_path = local_url.path[1:]
save_frame(local_path, image)
else:
generate_reference_image(reference_index, dataset=dataset, index=index, **kwargs)
return
def generate_perturbation_pattern(index, nb_horizontal_checks=10, nb_vertical_checks=10, path=None):
assert path is not None
np.random.seed(seed=index)
dtype = np.uint8
info = np.iinfo(dtype)
a = np.array([info.min, info.max], dtype=dtype)
height = nb_vertical_checks
width = nb_horizontal_checks
shape = (height, width)
pattern = np.random.choice(a=a, size=shape)
image = fromarray(pattern)
image.save(path)
return
def collect_perturbation_pattern(index, nb_horizontal_checks=10, nb_vertical_checks=10, path=None):
assert path is not None
url = get_resource_locator(index, dataset='checkerboard', scheme='file', path=path)
if not is_resource(url):
url = urlparse(url)
path = url.path[1:]
generate_perturbation_pattern(index, nb_horizontal_checks=nb_horizontal_checks,
nb_vertical_checks=nb_vertical_checks, path=path)
return
def load_perturbation_pattern(index, path):
url = get_resource_locator(index, dataset='checkerboard', scheme='file', path=path)
url = urlparse(url)
path = url.path[1:]
image = open_image(path)
data = image.getdata()
data = np.array(data, dtype=np.uint8)
width, height = image.size
data = data.reshape(height, width)
data = data.astype(np.float)
data = data / np.iinfo(np.uint8).max
return data
def load_reference_image_old(index, path):
dtype = np.uint16
height = 1024
width = 1536
url = get_resource_locator(index, dataset='van Hateren', scheme='file', path=path)
url = urlparse(url)
path = url.path[1:]
with open(path, mode='rb') as handle:
data_bytes = handle.read()
data = array.array('H', data_bytes)
data.byteswap()
data = np.array(data, dtype=dtype)
data = data.reshape(height, width)
data = data.astype(np.float)
data = data / np.iinfo(dtype).max
return data
def load_reference_image(index, path):
url = get_resource_locator(index, dataset='reference', scheme='file', path=path)
url = urlparse(url)
path = url.path[1:]
image = open_image(path)
data = image.getdata()
data = np.array(data, dtype=np.uint8)
width, height = image.size
data = data.reshape(height, width)
data = data.astype(np.float)
info = np.iinfo(np.uint8)
data = (data - float(info.min)) / float(info.max - info.min + 1)
return data
def get_frame(image, config):
image_height, image_width = image.shape
image_resolution = config['image_resolution']
frame_width = config['frame']['width']
frame_height = config['frame']['height']
frame_shape = frame_height, frame_width
frame_resolution = config['frame']['resolution']
background_luminance = config['background_luminance']
if frame_resolution <= image_resolution:
# Up-sample the image (interpolation).
image_x = image_resolution * np.arange(0, image_height)
image_x = image_x - np.mean(image_x)
image_y = image_resolution * np.arange(0, image_width)
image_y = image_y - np.mean(image_y)
image_z = image
spline = scipy.interpolate.RectBivariateSpline(image_x, image_y, image_z, kx=1, ky=1)
frame_x = frame_resolution * np.arange(0, frame_height)
frame_x = frame_x - np.mean(frame_x)
mask_x = np.logical_and(
np.min(image_x) - 0.5 * image_resolution <= frame_x,
frame_x <= np.max(image_x) + 0.5 * image_resolution
)
frame_y = frame_resolution * np.arange(0, frame_width)
frame_y = frame_y - np.mean(frame_y)
mask_y = np.logical_and(
np.min(image_y) - 0.5 * image_resolution <= frame_y,
frame_y <= np.max(image_y) + 0.5 * image_resolution
)
frame_z = spline(frame_x[mask_x], frame_y[mask_y])
frame_i_min = np.min(np.nonzero(mask_x))
frame_i_max = np.max(np.nonzero(mask_x)) + 1
frame_j_min = np.min(np.nonzero(mask_y))
frame_j_max = np.max(np.nonzero(mask_y)) + 1
frame = background_luminance * np.ones(frame_shape, dtype=np.float)
frame[frame_i_min:frame_i_max, frame_j_min:frame_j_max] = frame_z
else:
# Down-sample the image (decimation).
image_frequency = 1.0 / image_resolution
frame_frequency = 1.0 / frame_resolution
cutoff = frame_frequency / image_frequency
sigma = math.sqrt(2.0 * math.log(2.0)) / (2.0 * math.pi * cutoff)
filtered_image = scipy.ndimage.gaussian_filter(image, sigma=sigma)
# see https://en.wikipedia.org/wiki/Gaussian_filter for a justification of this formula
image_x = image_resolution * np.arange(0, image_height)
image_x = image_x - np.mean(image_x)
image_y = image_resolution * np.arange(0, image_width)
image_y = image_y - np.mean(image_y)
image_z = filtered_image
spline = scipy.interpolate.RectBivariateSpline(image_x, image_y, image_z, kx=1, ky=1)
frame_x = frame_resolution * np.arange(0, frame_height)
frame_x = frame_x - np.mean(frame_x)
mask_x = np.logical_and(
np.min(image_x) - 0.5 * image_resolution <= frame_x,
frame_x <= np.max(image_x) + 0.5 * image_resolution
)
frame_y = frame_resolution * np.arange(0, frame_width)
frame_y = frame_y - np.mean(frame_y)
mask_y = np.logical_and(
np.min(image_y) - 0.5 * image_resolution <= frame_y,
frame_y <= np.max(image_y) + 0.5 * image_resolution
)
frame_z = spline(frame_x[mask_x], frame_y[mask_y])
frame_i_min = np.min(np.nonzero(mask_x))
frame_i_max = np.max(np.nonzero(mask_x)) + 1
frame_j_min = np.min(np.nonzero(mask_y))
frame_j_max = np.max(np.nonzero(mask_y)) + 1
frame = background_luminance * np.ones(frame_shape, dtype=np.float)
frame[frame_i_min:frame_i_max, frame_j_min:frame_j_max] = frame_z
limits = frame_i_min, frame_i_max, frame_j_min, frame_j_max
return frame, limits
def get_reference_frame(reference_image, config):
frame, limits = get_frame(reference_image, config)
i_min, i_max, j_min, j_max = limits
mean_luminance = config['mean_luminance']
std_luminance = config['std_luminance']
frame_roi = frame[i_min:i_max, j_min:j_max]
frame_roi = frame_roi - np.mean(frame_roi)
if np.std(frame_roi) > 0.0:
frame_roi = frame_roi / np.std(frame_roi)
frame_roi = frame_roi * std_luminance
frame_roi = frame_roi + mean_luminance
frame[i_min:i_max, j_min:j_max] = frame_roi
return frame
def get_perturbation_frame(perturbation_image, config, verbose=False):
frame_width = config['frame']['width']
frame_height = config['frame']['height']
frame_shape = (frame_height, frame_width)
frame_resolution = config['frame']['resolution']
frame_x = frame_resolution * np.arange(0, frame_height)
frame_x = frame_x - np.mean(frame_x)
frame_y = frame_resolution * np.arange(0, frame_width)
frame_y = frame_y - np.mean(frame_y)
perturbation_resolution = config['perturbations']['resolution']
perturbation = perturbation_image
perturbation_height, perturbation_width = perturbation.shape
perturbation_x = perturbation_resolution * np.arange(0, perturbation_height)
perturbation_x = perturbation_x - np.mean(perturbation_x)
perturbation_y = perturbation_resolution * np.arange(0, perturbation_width)
perturbation_y = perturbation_y - np.mean(perturbation_y)
perturbation_z = perturbation
perturbation_x_, perturbation_y_ = np.meshgrid(perturbation_x, perturbation_y)
perturbation_points = np.stack((perturbation_x_.flatten(), perturbation_y_.flatten()), axis=-1)
perturbation_data = perturbation_z.flatten()
interpolate = scipy.interpolate.NearestNDInterpolator(perturbation_points, perturbation_data)
mask_x = np.logical_and(
np.min(perturbation_x) - 0.5 * perturbation_resolution <= frame_x,
frame_x <= np.max(perturbation_x) + 0.5 * perturbation_resolution
)
mask_y = np.logical_and(
np.min(perturbation_y) - 0.5 * perturbation_resolution <= frame_y,
frame_y <= np.max(perturbation_y) + 0.5 * perturbation_resolution
)
frame_x_, frame_y_ = np.meshgrid(frame_x[mask_x], frame_y[mask_y])
frame_x_ = frame_x_.transpose().flatten()
frame_y_ = frame_y_.transpose().flatten()
frame_points_ = np.stack((frame_x_, frame_y_), axis=-1)
frame_data_ = interpolate(frame_points_)
frame_z_ = np.reshape(frame_data_, (frame_x[mask_x].size, frame_y[mask_y].size))
i_min = np.min(np.nonzero(mask_x))
i_max = np.max(np.nonzero(mask_x)) + 1
j_min = np.min(np.nonzero(mask_y))
j_max = np.max(np.nonzero(mask_y)) + 1
frame = np.zeros(frame_shape, dtype=np.float)
frame[i_min:i_max, j_min:j_max] = frame_z_
frame_roi = frame[i_min:i_max, j_min:j_max]
if verbose:
print("mean (luminance): {}".format(np.mean(frame_roi)))
print("std (luminance): {}".format(np.std(frame_roi)))
print("min (luminance): {}".format(np.min(frame_roi)))
print("max (luminance): {}".format(np.max(frame_roi)))
frame_roi = -1.0 + 2.0 * frame_roi
if verbose:
print("mean (luminance): {}".format(np.mean(frame_roi)))
print("std (luminance): {}".format(np.std(frame_roi)))
print("min (luminance): {}".format(np.min(frame_roi)))
print("max (luminance): {}".format(np.max(frame_roi)))
frame[i_min:i_max, j_min:j_max] = frame_roi
return frame
def float_frame_to_uint8_frame(float_frame):
# Rescale pixel values.
dtype = np.uint8
dinfo = np.iinfo(dtype)
nb_levels = (dinfo.max - dinfo.min + 1)
float_frame = float(nb_levels) * float_frame
# Process saturating pixels.
float_frame[float_frame < float(dinfo.min)] = float(dinfo.min)
float_frame[float(dinfo.max + 1) <= float_frame] = float(dinfo.max)
# Convert pixel types.
uint8_frame = float_frame.astype(dtype)
return uint8_frame
def get_perturbed_frame(reference_image, perturbation_pattern, perturbation_amplitude, config):
reference_frame = get_reference_frame(reference_image, config)
perturbation_frame = get_perturbation_frame(perturbation_pattern, config)
frame = reference_frame + perturbation_amplitude * perturbation_frame
return frame
def get_combinations(reference_images_indices, perturbation_patterns_indices, perturbation_amplitudes_indices):
index = 1 # 0 skipped because it corresponds to the frame used between stimuli
combinations = {}
for i in reference_images_indices:
combinations[index] = (i, np.nan, np.nan) # TODO check if `np.nan` are valid.
index +=1
for i in reference_images_indices:
for j in perturbation_patterns_indices:
for k in perturbation_amplitudes_indices:
combinations[index] = (i, j, k)
index += 1
return combinations
def get_random_combinations(reference_images_indices, random_perturbation_patterns_indices, nb_combinations):
index = nb_combinations # skip deterministic combinations
combinations = {}
for j in random_perturbation_patterns_indices: # pattern first
for i in reference_images_indices:
combinations[index] = (i, j, np.nan)
index += 1
return combinations
def save_frame(path, frame):
image = fromarray(frame)
image.save(path)
return
def get_permutations(indices, nb_repetitions=1, seed=42):
np.random.seed(seed)
permutations = {
k: np.random.permutation(indices)
for k in range(0, nb_repetitions)
}
return permutations
def get_random_combination_groups(random_combination_indices, nb_repetitions=1):
nb_combinations = len(random_combination_indices)
nb_combinations_per_repetition = nb_combinations // nb_repetitions
assert nb_combinations % nb_repetitions == 0, "not nb_combinations ({}) % nb_repetitions ({}) == 0".format(nb_combinations, nb_repetitions)
groups = {
k: random_combination_indices[(k+0)*nb_combinations_per_repetition:(k+1)* nb_combinations_per_repetition]
for k in range(0, nb_repetitions)
}
return groups
def generate(args):
config = handle_arguments_and_configurations(name, args)
path = config['path']
if not os.path.isdir(path):
os.makedirs(path)
print(path)
reference_images_path = os.path.join(path, "reference_images")
if not os.path.isdir(reference_images_path):
os.makedirs(reference_images_path)
perturbation_patterns_path = os.path.join(path, "perturbation_patterns")
if not os.path.isdir(perturbation_patterns_path):
os.makedirs(perturbation_patterns_path)
frames_path = os.path.join(path, "frames")
if not os.path.isdir(frames_path):
os.makedirs(frames_path)
# Get configuration parameters.
reference_images = config['reference_images']
nb_horizontal_checks = config['perturbations']['nb_horizontal_checks']
nb_vertical_checks = config['perturbations']['nb_vertical_checks']
assert nb_horizontal_checks % 2 == 0, "number of checks should be even (horizontally): {}".format(nb_horizontal_checks)
assert nb_vertical_checks % 2 == 0, "number of checks should be even (vertically): {}".format(nb_vertical_checks)
with_random_patterns = config['perturbations']['with_random_patterns']
perturbation_patterns_indices = config['perturbations']['pattern_indices']
perturbation_amplitudes = config['perturbations']['amplitudes']
perturbation_amplitudes_indices = [k for k, _ in enumerate(perturbation_amplitudes)]
display_rate = config['display_rate']
frame_width_in_px = config['frame']['width']
frame_height_in_px = config['frame']['height']
frame_duration = config['frame']['duration']
nb_repetitions = config['nb_repetitions']
# Collect reference images.
reference_indices = [
int(key)
for key in reference_images.keys()
]
for reference_index in reference_indices:
dataset, index = reference_images[str(reference_index)]
collect_reference_image(reference_index, dataset=dataset, index=index,
path=reference_images_path, config=config)
# Create .csv file for reference_image.
csv_filename = "{}_reference_images.csv".format(name)
csv_path = os.path.join(path, csv_filename)
columns = ['reference_image_path']
csv_file = open_csv_file(csv_path, columns=columns)
for index in reference_indices:
reference_image_path = os.path.join("reference_images", "reference_{:05d}.png".format(index))
csv_file.append(reference_image_path=reference_image_path)
csv_file.close()
# Prepare perturbation pattern indices.
if with_random_patterns:
# TODO check the following lines!
# Compute the number of random patterns.
nb_perturbation_patterns = len(perturbation_patterns_indices)
nb_perturbation_amplitudes = len(perturbation_amplitudes_indices)
nb_perturbations = nb_perturbation_patterns * nb_perturbation_amplitudes
nb_random_patterns = nb_perturbations * nb_repetitions
print("number of random patterns: {}".format(nb_random_patterns)) # TODO remove this line.
# Choose the indices of the random patterns.
random_patterns_indices = nb_perturbation_patterns + np.arange(0, nb_random_patterns)
# Define pattern indices.
all_patterns_indices = np.concatenate((perturbation_patterns_indices, random_patterns_indices))
else:
nb_random_patterns = 0
random_patterns_indices = None
all_patterns_indices = perturbation_patterns_indices
print("Start collecting perturbation patterns...")
# TODO remove the following commented lines?
# for index in perturbation_patterns_indices:
# collect_perturbation_pattern(index, nb_horizontal_checks=nb_horizontal_checks,
# nb_vertical_checks=nb_vertical_checks, path=perturbation_patterns_path)
for index in tqdm.tqdm(all_patterns_indices):
collect_perturbation_pattern(index, nb_horizontal_checks=nb_horizontal_checks,
nb_vertical_checks=nb_vertical_checks, path=perturbation_patterns_path)
print("End collecting perturbation patterns.")
# Create .csv file for perturbation pattern.
csv_filename = "{}_perturbation_patterns.csv".format(name)
csv_path = os.path.join(path, csv_filename)
columns = ['perturbation_pattern_path']
csv_file = open_csv_file(csv_path, columns=columns)
# TODO remove the following commented line?
# for index in perturbation_patterns_indices:
for index in all_patterns_indices:
perturbation_pattern_path = os.path.join("perturbation_patterns", "checkerboard{:05d}.png".format(index))
csv_file.append(perturbation_pattern_path=perturbation_pattern_path)
csv_file.close()
# Create .csv file for perturbation amplitudes.
csv_filename = "{}_perturbation_amplitudes.csv".format(name)
csv_path = os.path.join(path, csv_filename)
columns = ['perturbation_amplitude']
csv_file = open_csv_file(csv_path, columns=columns)
for perturbation_amplitude in perturbation_amplitudes:
csv_file.append(perturbation_amplitude=perturbation_amplitude)
csv_file.close()
# Compute the number of images.
nb_reference_images = len(reference_indices)
nb_perturbation_patterns = len(perturbation_patterns_indices)
nb_perturbation_amplitudes = len(perturbation_amplitudes_indices)
nb_images = 1 + nb_reference_images * (1 + nb_perturbation_patterns * nb_perturbation_amplitudes)
if with_random_patterns:
nb_images = nb_images + nb_reference_images * nb_random_patterns # TODO check this line!
combinations = get_combinations(reference_indices, perturbation_patterns_indices,
perturbation_amplitudes_indices)
if with_random_patterns:
nb_deterministic_combinations = len(combinations) + 1 # +1 to take inter-stimulus frame into account
random_combinations = get_random_combinations(reference_indices, random_patterns_indices, nb_deterministic_combinations)
else:
random_combinations = None
# Create .csv file.
csv_filename = "{}_combinations.csv".format(name)
csv_path = os.path.join(path, csv_filename)
columns = ['reference_id', 'perturbation_pattern_id', 'perturbation_amplitude_id']
csv_file = open_csv_file(csv_path, columns=columns, dtype='Int64')
for combination_index in combinations:
combination = combinations[combination_index]
# TODO remove the following commented lines?
# kwargs = {
# 'reference_id': reference_indices[combination[0]],
# 'perturbation_pattern_id': perturbation_patterns_indices[combination[1]],
# 'perturbation_amplitude_id': perturbation_amplitudes_indices[combination[2]],
# }
reference_id, perturbation_pattern_id, perturbation_amplitude_id = combination
kwargs = {
'reference_id': reference_id,
'perturbation_pattern_id': perturbation_pattern_id,
'perturbation_amplitude_id': perturbation_amplitude_id,
}
csv_file.append(**kwargs)
# TODO check the following lines!
# Add random pattern (if necessary).
if with_random_patterns:
for combination_index in random_combinations:
combination = random_combinations[combination_index]
reference_id, pattern_id, amplitude_id = combination
kwargs = {
'reference_id': reference_id,
'perturbation_pattern_id': pattern_id,
'perturbation_amplitude_id': amplitude_id,
}
csv_file.append(**kwargs)
csv_file.close()
# TODO fix the permutations?
nb_combinations = len(combinations)
nb_frame_displays = int(display_rate * frame_duration)
assert display_rate * frame_duration == float(nb_frame_displays)
nb_displays = nb_frame_displays + nb_repetitions * nb_combinations * (2 * nb_frame_displays)
if with_random_patterns:
nb_random_combinations = len(random_combinations)
nb_displays = nb_displays + nb_random_combinations * (2 * nb_frame_displays)
display_time = float(nb_displays) / display_rate
print("display time: {} s ({} min)".format(display_time, display_time / 60.0))
# TODO improve feedback.
combination_indices = list(combinations)
permutations = get_permutations(combination_indices, nb_repetitions=nb_repetitions)
if with_random_patterns:
random_combination_indices = list(random_combinations)
random_combination_groups = get_random_combination_groups(random_combination_indices, nb_repetitions=nb_repetitions)
else:
random_combination_groups = None
print("Start creating .bin file...")
# Create .bin file.
bin_filename = "fipwc.bin"
bin_path = os.path.join(path, bin_filename)
bin_file = open_bin_file(bin_path, nb_images, frame_width=frame_width_in_px, frame_height=frame_height_in_px)
# Save grey frame.
grey_frame = get_grey_frame(frame_width_in_px, frame_height_in_px, luminance=0.5)
grey_frame = float_frame_to_uint8_frame(grey_frame)
# # Save frame in .bin file.
bin_file.append(grey_frame)
# # Save frame as .png file.
grey_frame_filename = "grey.png"
grey_frame_path = os.path.join(frames_path, grey_frame_filename)
save_frame(grey_frame_path, grey_frame)
# Save reference frames.
for reference_index in reference_indices:
# Get reference frame.
reference_image = load_reference_image(reference_index, reference_images_path)
reference_frame = float_frame_to_uint8_frame(reference_image)
# Save frame in .bin file.
bin_file.append(reference_frame)
# Save frame as .png file.
reference_frame_filename = "reference_{:05d}.png".format(reference_index)
reference_frame_path = os.path.join(frames_path, reference_frame_filename)
save_frame(reference_frame_path, reference_frame)
# Save perturbed frames.
for reference_index in tqdm.tqdm(reference_indices):
reference_image = load_reference_image(reference_index, reference_images_path)
for perturbation_pattern_index in perturbation_patterns_indices:
perturbation_pattern = load_perturbation_pattern(perturbation_pattern_index, perturbation_patterns_path)
for perturbation_amplitude_index in perturbation_amplitudes_indices:
# Get perturbed frame.
perturbation_amplitude = perturbation_amplitudes[perturbation_amplitude_index]
perturbed_frame = get_perturbed_frame(reference_image, perturbation_pattern, perturbation_amplitude,
config)
perturbed_frame = float_frame_to_uint8_frame(perturbed_frame)
# Save frame in .bin file.
bin_file.append(perturbed_frame)
# Save frame as .png file.
perturbed_frame_filename = "perturbed_r{:05d}_p{:05d}_a{:05d}.png".format(reference_index,
perturbation_pattern_index,
perturbation_amplitude_index)
perturbed_frame_path = os.path.join(frames_path, perturbed_frame_filename)
save_frame(perturbed_frame_path, perturbed_frame)
# TODO check the following lines!
# Save randomly perturbed frames (if necessary).
if with_random_patterns:
for reference_index in tqdm.tqdm(reference_indices):
reference_image = load_reference_image(reference_index, reference_images_path)
for perturbation_pattern_index in random_patterns_indices:
pattern = load_perturbation_pattern(perturbation_pattern_index, perturbation_patterns_path)
# Get perturbed frame.
amplitude = float(15) / float(256) # TODO change this value?
frame = get_perturbed_frame(reference_image, pattern, amplitude, config)
frame = float_frame_to_uint8_frame(frame)
# Save frame in .bin file.
bin_file.append(frame)
# Save frame as .png file (if necessary).
if perturbation_pattern_index < 100:
perturbed_frame_filename = "perturbed_r{:05d}_p{:05d}.png".format(reference_index,
perturbation_pattern_index)
perturbed_frame_path = os.path.join(frames_path, perturbed_frame_filename)
save_frame(perturbed_frame_path, frame)
bin_file.close()
print("End creating .bin file.")
print("Start creating .vec and .csv files...")
# Create .vec and .csv files.
vec_filename = "{}.vec".format(name)
vec_path = os.path.join(path, vec_filename)
vec_file = open_vec_file(vec_path, nb_displays=nb_displays)
csv_filename = "{}.csv".format(name)
csv_path = os.path.join(path, csv_filename)
csv_file = open_csv_file(csv_path, columns=['k_min', 'k_max', 'combination_id'])
# Append adaptation.
grey_frame_id = 0
for _ in range(0, nb_frame_displays):
vec_file.append(grey_frame_id)
# For each repetition...
for repetition_index in range(0, nb_repetitions):
# Add frozen patterns.
combination_indices = permutations[repetition_index]
for combination_index in combination_indices:
combination_frame_id = combination_index
k_min = vec_file.get_display_index() + 1
# Append trial.
for _ in range(0, nb_frame_displays):
vec_file.append(combination_frame_id)
k_max = vec_file.get_display_index()
csv_file.append(k_min=k_min, k_max=k_max, combination_id=combination_index)
# Append intertrial.
for _ in range(0, nb_frame_displays):
vec_file.append(grey_frame_id)
# TODO add a random pattern.
# Add random patterns (if necessary).
if with_random_patterns:
random_combination_indices = random_combination_groups[repetition_index]
for combination_index in random_combination_indices:
combination_frame_id = combination_index
k_min = vec_file.get_display_index() + 1
# Append trial.
for _ in range(0, nb_frame_displays):
vec_file.append(combination_frame_id)
k_max = vec_file.get_display_index()
csv_file.append(k_min=k_min, k_max=k_max, combination_id=combination_index)
# Append intertrial.
for _ in range(0, nb_frame_displays):
vec_file.append(grey_frame_id)
csv_file.close()
vec_file.close()
print("End creating .vec and .csv files.")
return
| [
"numpy.random.seed",
"numpy.iinfo",
"numpy.ones",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"pystim.utils.handle_arguments_and_configurations",
"os.path.join",
"urllib.parse.urlparse",
"numpy.meshgrid",
"numpy.std",
"scipy.ndimage.gaussian_filter",
"pystim.utils.get_grey_frame",
"url... | [((2915, 2943), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (2927, 2943), False, 'import os\n'), ((3289, 3309), 'io.BytesIO', 'io.BytesIO', (['resource'], {}), '(resource)\n', (3299, 3309), False, 'import io\n'), ((3322, 3338), 'PIL.Image.open', 'open_image', (['data'], {}), '(data)\n', (3332, 3338), True, 'from PIL.Image import open as open_image\n'), ((3377, 3407), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (3385, 3407), True, 'import numpy as np\n'), ((5411, 5437), 'array.array', 'array.array', (['"""H"""', 'resource'], {}), "('H', resource)\n", (5422, 5437), False, 'import array\n'), ((5469, 5500), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.uint16'}), '(data, dtype=np.uint16)\n', (5477, 5500), True, 'import numpy as np\n'), ((6935, 6948), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (6943, 6948), False, 'from urllib.parse import urlparse\n'), ((7521, 7534), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (7529, 7534), False, 'from urllib.parse import urlparse\n'), ((9387, 9413), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'index'}), '(seed=index)\n', (9401, 9413), True, 'import numpy as np\n'), ((9446, 9461), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (9454, 9461), True, 'import numpy as np\n'), ((9470, 9513), 'numpy.array', 'np.array', (['[info.min, info.max]'], {'dtype': 'dtype'}), '([info.min, info.max], dtype=dtype)\n', (9478, 9513), True, 'import numpy as np\n'), ((9621, 9654), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'a', 'size': 'shape'}), '(a=a, size=shape)\n', (9637, 9654), True, 'import numpy as np\n'), ((9667, 9685), 'PIL.Image.fromarray', 'fromarray', (['pattern'], {}), '(pattern)\n', (9676, 9685), False, 'from PIL.Image import fromarray\n'), ((10357, 10370), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (10365, 10370), False, 'from urllib.parse import urlparse\n'), ((10408, 10424), 'PIL.Image.open', 'open_image', (['path'], {}), '(path)\n', (10418, 10424), True, 'from PIL.Image import open as open_image\n'), ((10463, 10493), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (10471, 10493), True, 'import numpy as np\n'), ((10857, 10870), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (10865, 10870), False, 'from urllib.parse import urlparse\n'), ((10984, 11012), 'array.array', 'array.array', (['"""H"""', 'data_bytes'], {}), "('H', data_bytes)\n", (10995, 11012), False, 'import array\n'), ((11044, 11071), 'numpy.array', 'np.array', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (11052, 11071), True, 'import numpy as np\n'), ((11337, 11350), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (11345, 11350), False, 'from urllib.parse import urlparse\n'), ((11388, 11404), 'PIL.Image.open', 'open_image', (['path'], {}), '(path)\n', (11398, 11404), True, 'from PIL.Image import open as open_image\n'), ((11443, 11473), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (11451, 11473), True, 'import numpy as np\n'), ((11589, 11607), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (11597, 11607), True, 'import numpy as np\n'), ((16892, 16935), 'numpy.meshgrid', 'np.meshgrid', (['perturbation_x', 'perturbation_y'], {}), '(perturbation_x, perturbation_y)\n', (16903, 16935), True, 'import numpy as np\n'), ((17103, 17182), 'scipy.interpolate.NearestNDInterpolator', 'scipy.interpolate.NearestNDInterpolator', (['perturbation_points', 'perturbation_data'], {}), '(perturbation_points, perturbation_data)\n', (17142, 17182), False, 'import scipy\n'), ((17576, 17621), 'numpy.meshgrid', 'np.meshgrid', (['frame_x[mask_x]', 'frame_y[mask_y]'], {}), '(frame_x[mask_x], frame_y[mask_y])\n', (17587, 17621), True, 'import numpy as np\n'), ((17734, 17773), 'numpy.stack', 'np.stack', (['(frame_x_, frame_y_)'], {'axis': '(-1)'}), '((frame_x_, frame_y_), axis=-1)\n', (17742, 17773), True, 'import numpy as np\n'), ((17834, 17903), 'numpy.reshape', 'np.reshape', (['frame_data_', '(frame_x[mask_x].size, frame_y[mask_y].size)'], {}), '(frame_data_, (frame_x[mask_x].size, frame_y[mask_y].size))\n', (17844, 17903), True, 'import numpy as np\n'), ((18080, 18117), 'numpy.zeros', 'np.zeros', (['frame_shape'], {'dtype': 'np.float'}), '(frame_shape, dtype=np.float)\n', (18088, 18117), True, 'import numpy as np\n'), ((18968, 18983), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (18976, 18983), True, 'import numpy as np\n'), ((20738, 20754), 'PIL.Image.fromarray', 'fromarray', (['frame'], {}), '(frame)\n', (20747, 20754), False, 'from PIL.Image import fromarray\n'), ((20853, 20873), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (20867, 20873), True, 'import numpy as np\n'), ((21597, 21644), 'pystim.utils.handle_arguments_and_configurations', 'handle_arguments_and_configurations', (['name', 'args'], {}), '(name, args)\n', (21632, 21644), False, 'from pystim.utils import handle_arguments_and_configurations\n'), ((21775, 21813), 'os.path.join', 'os.path.join', (['path', '"""reference_images"""'], {}), "(path, 'reference_images')\n", (21787, 21813), False, 'import os\n'), ((21940, 21983), 'os.path.join', 'os.path.join', (['path', '"""perturbation_patterns"""'], {}), "(path, 'perturbation_patterns')\n", (21952, 21983), False, 'import os\n'), ((22105, 22133), 'os.path.join', 'os.path.join', (['path', '"""frames"""'], {}), "(path, 'frames')\n", (22117, 22133), False, 'import os\n'), ((23736, 23768), 'os.path.join', 'os.path.join', (['path', 'csv_filename'], {}), '(path, csv_filename)\n', (23748, 23768), False, 'import os\n'), ((23823, 23863), 'pystim.io.csv.open_file', 'open_csv_file', (['csv_path'], {'columns': 'columns'}), '(csv_path, columns=columns)\n', (23836, 23863), True, 'from pystim.io.csv import open_file as open_csv_file\n'), ((25442, 25473), 'tqdm.tqdm', 'tqdm.tqdm', (['all_patterns_indices'], {}), '(all_patterns_indices)\n', (25451, 25473), False, 'import tqdm\n'), ((25851, 25883), 'os.path.join', 'os.path.join', (['path', 'csv_filename'], {}), '(path, csv_filename)\n', (25863, 25883), False, 'import os\n'), ((25943, 25983), 'pystim.io.csv.open_file', 'open_csv_file', (['csv_path'], {'columns': 'columns'}), '(csv_path, columns=columns)\n', (25956, 25983), True, 'from pystim.io.csv import open_file as open_csv_file\n'), ((26466, 26498), 'os.path.join', 'os.path.join', (['path', 'csv_filename'], {}), '(path, csv_filename)\n', (26478, 26498), False, 'import os\n'), ((26555, 26595), 'pystim.io.csv.open_file', 'open_csv_file', (['csv_path'], {'columns': 'columns'}), '(csv_path, columns=columns)\n', (26568, 26595), True, 'from pystim.io.csv import open_file as open_csv_file\n'), ((27762, 27794), 'os.path.join', 'os.path.join', (['path', 'csv_filename'], {}), '(path, csv_filename)\n', (27774, 27794), False, 'import os\n'), ((27897, 27952), 'pystim.io.csv.open_file', 'open_csv_file', (['csv_path'], {'columns': 'columns', 'dtype': '"""Int64"""'}), "(csv_path, columns=columns, dtype='Int64')\n", (27910, 27952), True, 'from pystim.io.csv import open_file as open_csv_file\n'), ((30407, 30439), 'os.path.join', 'os.path.join', (['path', 'bin_filename'], {}), '(path, bin_filename)\n', (30419, 30439), False, 'import os\n'), ((30455, 30557), 'pystim.io.bin.open_file', 'open_bin_file', (['bin_path', 'nb_images'], {'frame_width': 'frame_width_in_px', 'frame_height': 'frame_height_in_px'}), '(bin_path, nb_images, frame_width=frame_width_in_px,\n frame_height=frame_height_in_px)\n', (30468, 30557), True, 'from pystim.io.bin import open_file as open_bin_file\n'), ((30594, 30662), 'pystim.utils.get_grey_frame', 'get_grey_frame', (['frame_width_in_px', 'frame_height_in_px'], {'luminance': '(0.5)'}), '(frame_width_in_px, frame_height_in_px, luminance=0.5)\n', (30608, 30662), False, 'from pystim.utils import get_grey_frame\n'), ((30876, 30922), 'os.path.join', 'os.path.join', (['frames_path', 'grey_frame_filename'], {}), '(frames_path, grey_frame_filename)\n', (30888, 30922), False, 'import os\n'), ((31620, 31648), 'tqdm.tqdm', 'tqdm.tqdm', (['reference_indices'], {}), '(reference_indices)\n', (31629, 31648), False, 'import tqdm\n'), ((34519, 34551), 'os.path.join', 'os.path.join', (['path', 'vec_filename'], {}), '(path, vec_filename)\n', (34531, 34551), False, 'import os\n'), ((34567, 34615), 'pystim.io.vec.open_file', 'open_vec_file', (['vec_path'], {'nb_displays': 'nb_displays'}), '(vec_path, nb_displays=nb_displays)\n', (34580, 34615), True, 'from pystim.io.vec import open_file as open_vec_file\n'), ((34672, 34704), 'os.path.join', 'os.path.join', (['path', 'csv_filename'], {}), '(path, csv_filename)\n', (34684, 34704), False, 'import os\n'), ((34720, 34789), 'pystim.io.csv.open_file', 'open_csv_file', (['csv_path'], {'columns': "['k_min', 'k_max', 'combination_id']"}), "(csv_path, columns=['k_min', 'k_max', 'combination_id'])\n", (34733, 34789), True, 'from pystim.io.csv import open_file as open_csv_file\n'), ((779, 800), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (798, 800), False, 'import tempfile\n'), ((2650, 2662), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (2657, 2662), False, 'from urllib.request import urlopen, urlretrieve\n'), ((3789, 3817), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (3801, 3817), False, 'import os\n'), ((5819, 5847), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (5831, 5847), False, 'import os\n'), ((6186, 6214), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (6198, 6214), False, 'import os\n'), ((7020, 7040), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (7034, 7040), False, 'import os\n'), ((7811, 7826), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (7819, 7826), True, 'import numpy as np\n'), ((7930, 7942), 'PIL.Image.fromarray', 'fromarray', (['a'], {}), '(a)\n', (7939, 7942), False, 'from PIL.Image import fromarray\n'), ((9982, 9995), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (9990, 9995), False, 'from urllib.parse import urlparse\n'), ((12416, 12492), 'scipy.interpolate.RectBivariateSpline', 'scipy.interpolate.RectBivariateSpline', (['image_x', 'image_y', 'image_z'], {'kx': '(1)', 'ky': '(1)'}), '(image_x, image_y, image_z, kx=1, ky=1)\n', (12453, 12492), False, 'import scipy\n'), ((13771, 13820), 'scipy.ndimage.gaussian_filter', 'scipy.ndimage.gaussian_filter', (['image'], {'sigma': 'sigma'}), '(image, sigma=sigma)\n', (13800, 13820), False, 'import scipy\n'), ((14184, 14260), 'scipy.interpolate.RectBivariateSpline', 'scipy.interpolate.RectBivariateSpline', (['image_x', 'image_y', 'image_z'], {'kx': '(1)', 'ky': '(1)'}), '(image_x, image_y, image_z, kx=1, ky=1)\n', (14221, 14260), False, 'import scipy\n'), ((15642, 15660), 'numpy.mean', 'np.mean', (['frame_roi'], {}), '(frame_roi)\n', (15649, 15660), True, 'import numpy as np\n'), ((15668, 15685), 'numpy.std', 'np.std', (['frame_roi'], {}), '(frame_roi)\n', (15674, 15685), True, 'import numpy as np\n'), ((16193, 16219), 'numpy.arange', 'np.arange', (['(0)', 'frame_height'], {}), '(0, frame_height)\n', (16202, 16219), True, 'import numpy as np\n'), ((16244, 16260), 'numpy.mean', 'np.mean', (['frame_x'], {}), '(frame_x)\n', (16251, 16260), True, 'import numpy as np\n'), ((16294, 16319), 'numpy.arange', 'np.arange', (['(0)', 'frame_width'], {}), '(0, frame_width)\n', (16303, 16319), True, 'import numpy as np\n'), ((16344, 16360), 'numpy.mean', 'np.mean', (['frame_y'], {}), '(frame_y)\n', (16351, 16360), True, 'import numpy as np\n'), ((16581, 16614), 'numpy.arange', 'np.arange', (['(0)', 'perturbation_height'], {}), '(0, perturbation_height)\n', (16590, 16614), True, 'import numpy as np\n'), ((16653, 16676), 'numpy.mean', 'np.mean', (['perturbation_x'], {}), '(perturbation_x)\n', (16660, 16676), True, 'import numpy as np\n'), ((16724, 16756), 'numpy.arange', 'np.arange', (['(0)', 'perturbation_width'], {}), '(0, perturbation_width)\n', (16733, 16756), True, 'import numpy as np\n'), ((16795, 16818), 'numpy.mean', 'np.mean', (['perturbation_y'], {}), '(perturbation_y)\n', (16802, 16818), True, 'import numpy as np\n'), ((17923, 17941), 'numpy.nonzero', 'np.nonzero', (['mask_x'], {}), '(mask_x)\n', (17933, 17941), True, 'import numpy as np\n'), ((18005, 18023), 'numpy.nonzero', 'np.nonzero', (['mask_y'], {}), '(mask_y)\n', (18015, 18023), True, 'import numpy as np\n'), ((20907, 20937), 'numpy.random.permutation', 'np.random.permutation', (['indices'], {}), '(indices)\n', (20928, 20937), True, 'import numpy as np\n'), ((21683, 21702), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (21696, 21702), False, 'import os\n'), ((21712, 21729), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (21723, 21729), False, 'import os\n'), ((21825, 21861), 'os.path.isdir', 'os.path.isdir', (['reference_images_path'], {}), '(reference_images_path)\n', (21838, 21861), False, 'import os\n'), ((21871, 21905), 'os.makedirs', 'os.makedirs', (['reference_images_path'], {}), '(reference_images_path)\n', (21882, 21905), False, 'import os\n'), ((21995, 22036), 'os.path.isdir', 'os.path.isdir', (['perturbation_patterns_path'], {}), '(perturbation_patterns_path)\n', (22008, 22036), False, 'import os\n'), ((22046, 22085), 'os.makedirs', 'os.makedirs', (['perturbation_patterns_path'], {}), '(perturbation_patterns_path)\n', (22057, 22085), False, 'import os\n'), ((22145, 22171), 'os.path.isdir', 'os.path.isdir', (['frames_path'], {}), '(frames_path)\n', (22158, 22171), False, 'import os\n'), ((22181, 22205), 'os.makedirs', 'os.makedirs', (['frames_path'], {}), '(frames_path)\n', (22192, 22205), False, 'import os\n'), ((24855, 24927), 'numpy.concatenate', 'np.concatenate', (['(perturbation_patterns_indices, random_patterns_indices)'], {}), '((perturbation_patterns_indices, random_patterns_indices))\n', (24869, 24927), True, 'import numpy as np\n'), ((31454, 31505), 'os.path.join', 'os.path.join', (['frames_path', 'reference_frame_filename'], {}), '(frames_path, reference_frame_filename)\n', (31466, 31505), False, 'import os\n'), ((33187, 33215), 'tqdm.tqdm', 'tqdm.tqdm', (['reference_indices'], {}), '(reference_indices)\n', (33196, 33215), False, 'import tqdm\n'), ((7886, 7913), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (7893, 7913), True, 'import numpy as np\n'), ((8559, 8578), 'urllib.parse.urlparse', 'urlparse', (['local_url'], {}), '(local_url)\n', (8567, 8578), False, 'from urllib.parse import urlparse\n'), ((8635, 8670), 'urllib.request.urlretrieve', 'urlretrieve', (['remote_url', 'local_path'], {}), '(remote_url, local_path)\n', (8646, 8670), False, 'from urllib.request import urlopen, urlretrieve\n'), ((9024, 9043), 'urllib.parse.urlparse', 'urlparse', (['local_url'], {}), '(local_url)\n', (9032, 9043), False, 'from urllib.parse import urlparse\n'), ((10616, 10634), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (10624, 10634), True, 'import numpy as np\n'), ((11163, 11178), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (11171, 11178), True, 'import numpy as np\n'), ((12195, 12221), 'numpy.arange', 'np.arange', (['(0)', 'image_height'], {}), '(0, image_height)\n', (12204, 12221), True, 'import numpy as np\n'), ((12250, 12266), 'numpy.mean', 'np.mean', (['image_x'], {}), '(image_x)\n', (12257, 12266), True, 'import numpy as np\n'), ((12304, 12329), 'numpy.arange', 'np.arange', (['(0)', 'image_width'], {}), '(0, image_width)\n', (12313, 12329), True, 'import numpy as np\n'), ((12358, 12374), 'numpy.mean', 'np.mean', (['image_y'], {}), '(image_y)\n', (12365, 12374), True, 'import numpy as np\n'), ((12530, 12556), 'numpy.arange', 'np.arange', (['(0)', 'frame_height'], {}), '(0, frame_height)\n', (12539, 12556), True, 'import numpy as np\n'), ((12585, 12601), 'numpy.mean', 'np.mean', (['frame_x'], {}), '(frame_x)\n', (12592, 12601), True, 'import numpy as np\n'), ((12811, 12836), 'numpy.arange', 'np.arange', (['(0)', 'frame_width'], {}), '(0, frame_width)\n', (12820, 12836), True, 'import numpy as np\n'), ((12865, 12881), 'numpy.mean', 'np.mean', (['frame_y'], {}), '(frame_y)\n', (12872, 12881), True, 'import numpy as np\n'), ((13142, 13160), 'numpy.nonzero', 'np.nonzero', (['mask_x'], {}), '(mask_x)\n', (13152, 13160), True, 'import numpy as np\n'), ((13244, 13262), 'numpy.nonzero', 'np.nonzero', (['mask_y'], {}), '(mask_y)\n', (13254, 13262), True, 'import numpy as np\n'), ((13356, 13392), 'numpy.ones', 'np.ones', (['frame_shape'], {'dtype': 'np.float'}), '(frame_shape, dtype=np.float)\n', (13363, 13392), True, 'import numpy as np\n'), ((13954, 13980), 'numpy.arange', 'np.arange', (['(0)', 'image_height'], {}), '(0, image_height)\n', (13963, 13980), True, 'import numpy as np\n'), ((14009, 14025), 'numpy.mean', 'np.mean', (['image_x'], {}), '(image_x)\n', (14016, 14025), True, 'import numpy as np\n'), ((14063, 14088), 'numpy.arange', 'np.arange', (['(0)', 'image_width'], {}), '(0, image_width)\n', (14072, 14088), True, 'import numpy as np\n'), ((14117, 14133), 'numpy.mean', 'np.mean', (['image_y'], {}), '(image_y)\n', (14124, 14133), True, 'import numpy as np\n'), ((14298, 14324), 'numpy.arange', 'np.arange', (['(0)', 'frame_height'], {}), '(0, frame_height)\n', (14307, 14324), True, 'import numpy as np\n'), ((14353, 14369), 'numpy.mean', 'np.mean', (['frame_x'], {}), '(frame_x)\n', (14360, 14369), True, 'import numpy as np\n'), ((14579, 14604), 'numpy.arange', 'np.arange', (['(0)', 'frame_width'], {}), '(0, frame_width)\n', (14588, 14604), True, 'import numpy as np\n'), ((14633, 14649), 'numpy.mean', 'np.mean', (['frame_y'], {}), '(frame_y)\n', (14640, 14649), True, 'import numpy as np\n'), ((14910, 14928), 'numpy.nonzero', 'np.nonzero', (['mask_x'], {}), '(mask_x)\n', (14920, 14928), True, 'import numpy as np\n'), ((15012, 15030), 'numpy.nonzero', 'np.nonzero', (['mask_y'], {}), '(mask_y)\n', (15022, 15030), True, 'import numpy as np\n'), ((15124, 15160), 'numpy.ones', 'np.ones', (['frame_shape'], {'dtype': 'np.float'}), '(frame_shape, dtype=np.float)\n', (15131, 15160), True, 'import numpy as np\n'), ((15725, 15742), 'numpy.std', 'np.std', (['frame_roi'], {}), '(frame_roi)\n', (15731, 15742), True, 'import numpy as np\n'), ((17962, 17980), 'numpy.nonzero', 'np.nonzero', (['mask_x'], {}), '(mask_x)\n', (17972, 17980), True, 'import numpy as np\n'), ((18044, 18062), 'numpy.nonzero', 'np.nonzero', (['mask_y'], {}), '(mask_y)\n', (18054, 18062), True, 'import numpy as np\n'), ((24757, 24789), 'numpy.arange', 'np.arange', (['(0)', 'nb_random_patterns'], {}), '(0, nb_random_patterns)\n', (24766, 24789), True, 'import numpy as np\n'), ((13191, 13209), 'numpy.nonzero', 'np.nonzero', (['mask_x'], {}), '(mask_x)\n', (13201, 13209), True, 'import numpy as np\n'), ((13293, 13311), 'numpy.nonzero', 'np.nonzero', (['mask_y'], {}), '(mask_y)\n', (13303, 13311), True, 'import numpy as np\n'), ((14959, 14977), 'numpy.nonzero', 'np.nonzero', (['mask_x'], {}), '(mask_x)\n', (14969, 14977), True, 'import numpy as np\n'), ((15061, 15079), 'numpy.nonzero', 'np.nonzero', (['mask_y'], {}), '(mask_y)\n', (15071, 15079), True, 'import numpy as np\n'), ((17220, 17242), 'numpy.min', 'np.min', (['perturbation_x'], {}), '(perturbation_x)\n', (17226, 17242), True, 'import numpy as np\n'), ((17306, 17328), 'numpy.max', 'np.max', (['perturbation_x'], {}), '(perturbation_x)\n', (17312, 17328), True, 'import numpy as np\n'), ((17404, 17426), 'numpy.min', 'np.min', (['perturbation_y'], {}), '(perturbation_y)\n', (17410, 17426), True, 'import numpy as np\n'), ((17490, 17512), 'numpy.max', 'np.max', (['perturbation_y'], {}), '(perturbation_y)\n', (17496, 17512), True, 'import numpy as np\n'), ((18274, 18292), 'numpy.mean', 'np.mean', (['frame_roi'], {}), '(frame_roi)\n', (18281, 18292), True, 'import numpy as np\n'), ((18338, 18355), 'numpy.std', 'np.std', (['frame_roi'], {}), '(frame_roi)\n', (18344, 18355), True, 'import numpy as np\n'), ((18401, 18418), 'numpy.min', 'np.min', (['frame_roi'], {}), '(frame_roi)\n', (18407, 18418), True, 'import numpy as np\n'), ((18464, 18481), 'numpy.max', 'np.max', (['frame_roi'], {}), '(frame_roi)\n', (18470, 18481), True, 'import numpy as np\n'), ((18583, 18601), 'numpy.mean', 'np.mean', (['frame_roi'], {}), '(frame_roi)\n', (18590, 18601), True, 'import numpy as np\n'), ((18647, 18664), 'numpy.std', 'np.std', (['frame_roi'], {}), '(frame_roi)\n', (18653, 18664), True, 'import numpy as np\n'), ((18710, 18727), 'numpy.min', 'np.min', (['frame_roi'], {}), '(frame_roi)\n', (18716, 18727), True, 'import numpy as np\n'), ((18773, 18790), 'numpy.max', 'np.max', (['frame_roi'], {}), '(frame_roi)\n', (18779, 18790), True, 'import numpy as np\n'), ((32918, 32969), 'os.path.join', 'os.path.join', (['frames_path', 'perturbed_frame_filename'], {}), '(frames_path, perturbed_frame_filename)\n', (32930, 32969), False, 'import os\n'), ((12647, 12662), 'numpy.min', 'np.min', (['image_x'], {}), '(image_x)\n', (12653, 12662), True, 'import numpy as np\n'), ((12723, 12738), 'numpy.max', 'np.max', (['image_x'], {}), '(image_x)\n', (12729, 12738), True, 'import numpy as np\n'), ((12927, 12942), 'numpy.min', 'np.min', (['image_y'], {}), '(image_y)\n', (12933, 12942), True, 'import numpy as np\n'), ((13003, 13018), 'numpy.max', 'np.max', (['image_y'], {}), '(image_y)\n', (13009, 13018), True, 'import numpy as np\n'), ((13704, 13717), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (13712, 13717), False, 'import math\n'), ((14415, 14430), 'numpy.min', 'np.min', (['image_x'], {}), '(image_x)\n', (14421, 14430), True, 'import numpy as np\n'), ((14491, 14506), 'numpy.max', 'np.max', (['image_x'], {}), '(image_x)\n', (14497, 14506), True, 'import numpy as np\n'), ((14695, 14710), 'numpy.min', 'np.min', (['image_y'], {}), '(image_y)\n', (14701, 14710), True, 'import numpy as np\n'), ((14771, 14786), 'numpy.max', 'np.max', (['image_y'], {}), '(image_y)\n', (14777, 14786), True, 'import numpy as np\n'), ((34204, 34255), 'os.path.join', 'os.path.join', (['frames_path', 'perturbed_frame_filename'], {}), '(frames_path, perturbed_frame_filename)\n', (34216, 34255), False, 'import os\n')] |
import cv2
import torch
import numpy as np
import torch.nn as nn
from config import cfg
from utils.anchor import Anchors
def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans):
"""
args:
im: bgr based image
pos: center position
model_sz: exemplar size
s_z: original size
avg_chans: channel average
"""
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz + 1) / 2
# context_xmin = round(pos[0] - c) # py2 and py3 round
context_xmin = np.floor(pos[0] - c + 0.5)
context_xmax = context_xmin + sz - 1
# context_ymin = round(pos[1] - c)
context_ymin = np.floor(pos[1] - c + 0.5)
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)
te_im = np.zeros(size, np.uint8)
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch = te_im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
else:
im_patch = im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch, (model_sz, model_sz))
im_patch = im_patch.transpose(2, 0, 1)
im_patch = im_patch[np.newaxis, :, :, :]
im_patch = im_patch.astype(np.float32)
im_patch = torch.from_numpy(im_patch)
return im_patch
def generate_anchor(self, score_size):
anchors = Anchors(cfg.ANCHOR.STRIDE,
cfg.ANCHOR.RATIOS,
cfg.ANCHOR.SCALES)
anchor = anchors.anchors
x1, y1, x2, y2 = anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]
anchor = np.stack([(x1+x2)*0.5, (y1+y2)*0.5, x2-x1, y2-y1], 1)
total_stride = anchors.stride
anchor_num = anchor.shape[0]
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def convert_bbox(self, delta, anchor):
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
delta = delta.data.cpu().numpy()
delta[0, :] = delta[0, :] * anchor[:, 2] + anchor[:, 0]
delta[1, :] = delta[1, :] * anchor[:, 3] + anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * anchor[:, 3]
return delta
def convert_score(self, score):
score = score.permute(1, 2, 3, 0).contiguous().view(2, -1).permute(1, 0)
score = F.softmax(score, dim=1).data[:, 1].cpu().numpy()
return score
def bbox_clip(self, cx, cy, width, height, boundary):
cx = max(0, min(cx, boundary[1]))
cy = max(0, min(cy, boundary[0]))
width = max(10, min(width, boundary[1]))
height = max(10, min(height, boundary[0]))
return cx, cy, width, height | [
"utils.anchor.Anchors",
"numpy.stack",
"numpy.floor",
"numpy.zeros",
"numpy.exp",
"numpy.tile",
"numpy.array_equal",
"cv2.resize",
"torch.from_numpy"
] | [((597, 623), 'numpy.floor', 'np.floor', (['(pos[0] - c + 0.5)'], {}), '(pos[0] - c + 0.5)\n', (605, 623), True, 'import numpy as np\n'), ((726, 752), 'numpy.floor', 'np.floor', (['(pos[1] - c + 0.5)'], {}), '(pos[1] - c + 0.5)\n', (734, 752), True, 'import numpy as np\n'), ((2307, 2333), 'torch.from_numpy', 'torch.from_numpy', (['im_patch'], {}), '(im_patch)\n', (2323, 2333), False, 'import torch\n'), ((2414, 2478), 'utils.anchor.Anchors', 'Anchors', (['cfg.ANCHOR.STRIDE', 'cfg.ANCHOR.RATIOS', 'cfg.ANCHOR.SCALES'], {}), '(cfg.ANCHOR.STRIDE, cfg.ANCHOR.RATIOS, cfg.ANCHOR.SCALES)\n', (2421, 2478), False, 'from utils.anchor import Anchors\n'), ((2646, 2711), 'numpy.stack', 'np.stack', (['[(x1 + x2) * 0.5, (y1 + y2) * 0.5, x2 - x1, y2 - y1]', '(1)'], {}), '([(x1 + x2) * 0.5, (y1 + y2) * 0.5, x2 - x1, y2 - y1], 1)\n', (2654, 2711), True, 'import numpy as np\n'), ((1349, 1373), 'numpy.zeros', 'np.zeros', (['size', 'np.uint8'], {}), '(size, np.uint8)\n', (1357, 1373), True, 'import numpy as np\n'), ((2055, 2092), 'numpy.array_equal', 'np.array_equal', (['model_sz', 'original_sz'], {}), '(model_sz, original_sz)\n', (2069, 2092), True, 'import numpy as np\n'), ((2114, 2156), 'cv2.resize', 'cv2.resize', (['im_patch', '(model_sz, model_sz)'], {}), '(im_patch, (model_sz, model_sz))\n', (2124, 2156), False, 'import cv2\n'), ((3558, 3577), 'numpy.exp', 'np.exp', (['delta[2, :]'], {}), '(delta[2, :])\n', (3564, 3577), True, 'import numpy as np\n'), ((3612, 3631), 'numpy.exp', 'np.exp', (['delta[3, :]'], {}), '(delta[3, :])\n', (3618, 3631), True, 'import numpy as np\n'), ((2783, 2823), 'numpy.tile', 'np.tile', (['anchor', '(score_size * score_size)'], {}), '(anchor, score_size * score_size)\n', (2790, 2823), True, 'import numpy as np\n')] |
from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic
import sys
import time
import os
import errno
import numpy as np
import pyqtgraph as pg
import math
from Worker import Worker
from set_to_user_friendly_QLineEdit import set_to_user_friendly_QLineEdit
from outputs_parameters import outputs_parameters
from catch_exception import *
class AcqCard(QtWidgets.QMainWindow):
# Default values
fs = 125e6
START_ADDR = 0x0800_0000 # Define by reserved memory in devicetree used to build Linux
# Warning, if I put the '+1', there is an error : maybe a signal that wrap to 0 in the FPGA
# Therefore, to keep a multiple of 32 bits, I substracted 3 bytes
MAXPOINTS = int((0x1FFFFFFF-START_ADDR - 3)/2) # /2 because 2 bytes per points
# Reg addr :
xadc_base_addr = 0x0001_0000
DOWNSAMPLE_REG = 0x0007_0000 #32 bits, downsample_rate-1 : 0 for 125e6, n for 125e6/n
RESET_DMA_REG = 0x0008_0000 # 1 bit
MUX_ADC_1_REG = 0x0009_0000 # 1 bit
MUX_ADC_2_REG = 0x0009_0008 # 1 bit
N_BYTES_REG = 0x000A_0000 # 32 bits
CHANNEL_REG = 0x000A_0008 # 2 bits
START_REG = 0x000B_0000 # 1 bit : start acq on rising_edge
TRIG_REG = 0x000B_0004 # 1 bit : allow start on rising edge of external pin
STATUS_REG = 0x000B_0008 # 2 bits : error_ACQ (STS =! 0x80) & data_tvalid_int ('1' when data transfer is done)
START_ADDR_REG = 0x000C_0000 # 32 bits # Min value is define by reserved memory in devicetree used to build Linux (0x0800_0000 in this version)
def __init__(self, dev = None):
super(AcqCard, self).__init__()
self.dev = dev
# Set a few global PyQtGraph settings before creating plots:
pg.setConfigOption('leftButtonPan', False)
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pg.setConfigOption('antialias', True)
uic.loadUi("AcqCard_gui.ui", self)
self.timerTemperature = Qt.QTimer(self)
self.timerTemperature.timeout.connect(self.timerTemperatureUpdate)
self.timerTemperature.start(1000)
self.timerDataUpdate = Qt.QTimer(self)
self.threadpool = QtCore.QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
self.threadRunning = False
self.dev.write_Zynq_AXI_register_uint32(self.START_ADDR_REG, self.START_ADDR)
mux_value = 1 #0 for ADCs, 1 for counter
# set MUX
self.dev.write_Zynq_AXI_register_uint32(self.MUX_ADC_1_REG, mux_value)
self.dev.write_Zynq_AXI_register_uint32(self.MUX_ADC_2_REG, mux_value)
#small patch cause self.actual_fs need to be create to call the others functions:
downsample_value = int(float(self.lineEdit_downsampling.text()))
self.downsample_value = downsample_value
self.actual_fs = self.fs/self.downsample_value
self.dev.write_Zynq_AXI_register_uint32(self.DOWNSAMPLE_REG, self.downsample_value-1)
self.label_samplingRate.setText('fs = {:.3e} Hz'.format(self.actual_fs))
self.initUI()
self.verifyPath()
self.changeChannel() #also call changePlotLayout and changeNumberOfPoints
def timerTemperatureUpdate(self):
if self.threadRunning == False:
ZynqTempInDegC = self.readZynqTemperature()
self.label_RPTemperature.setText('Zynq temperature (max 85 °C operating): %.2f °C' % ZynqTempInDegC)
else:
self.label_RPTemperature.setText('Zynq temperature (max 85 °C operating): Can''t update temperature while transfering ddr')
def getDataFromZynq_thread(self):
bVerbose = False
self.timerDataUpdate.stop()
status = self.dev.read_Zynq_AXI_register_uint32(self.STATUS_REG)
if status == 0 and self.acq_active == 1:
if bVerbose:
print('Data not yet ready')
self.timerDataUpdate.singleShot(1000, self.getDataFromZynq_thread)
return
if status > 1: #ready != 0 and != 1, therefore, error on acquisition
print('Warning, there was an error the acquisition... Further debugging needed \n Possible causes : not enough points or too much')
progress_callback.emit('Error')
return
if status == 1:
worker = Worker(self.getDataFromZynq) # Any other args, kwargs are passed to the run function
worker.signals.finished.connect(self.thread_complete)
worker.signals.progress.connect(self.progressStatus_update)
# Execute
self.threadRunning = True
self.threadpool.start(worker)
def thread_complete (self):
self.threadRunning = False
self.label_status.setText('Status : Idle')
# Restart another acquisition if checkBox.isChecked()
if self.continuousDataAcquisition:
#resend start acq
if self.checkBox_numberOfWaveform.isChecked(): # if checked, re-start acquisition N-1 times
self.numberRemaining = self.numberRemaining - 1
if self.numberRemaining > 0:
self.start_acquisition(continuous = 1)
else: # if unchecked, always re-start acquisition
self.start_acquisition(continuous = 1)
def progressStatus_update (self, status):
self.label_status.setText('Status : {}'.format(status))
# This function is called in a thread (by getDataFromZynq_thread) to avoid crashing the GUI
def getDataFromZynq(self, progress_callback):
bVerbose = True
bVerboseTiming = False
##########################################################
#Transferring
progress_callback.emit('Transferring')
time_start = time.process_time()
totalNumberOfPoints = int(self.numberOfPoints * np.sum(self.channelValid))
self.data_in_bin = self.dev.read_Zynq_ddr(address_offset = 0, number_of_bytes=totalNumberOfPoints*2)
self.data_in_bin = np.fromstring(self.data_in_bin, dtype=np.int16)
self.data_in_volt = self.data_in_bin #self.data_in_bin / 2**15
if bVerboseTiming:
print("transfer read_Zynq_ddr {} pts : elapsed = {}".format(totalNumberOfPoints, (time.process_time()-time_start)))
##########################################################
#Plotting
progress_callback.emit('Plotting')
time_start = time.process_time()
if np.sum(self.channelValid) == 2:
#dual channel mode
# odd element => channel 0
# even element => channel 1
self.plot_timeDomain(self.data_in_volt[1::2], channel = 0) # does it create a copy of data array? maybe should pass a reference
self.plot_timeDomain(self.data_in_volt[::2], channel = 1)
self.plot_frequencyDomain(self.data_in_volt[1::2], channel = 0)
self.plot_frequencyDomain(self.data_in_volt[::2], channel = 1)
else:
# single channel mode
self.plot_timeDomain(self.data_in_volt, channel = self.channelValid.index(1))
self.plot_frequencyDomain(self.data_in_volt, channel = self.channelValid.index(1))
if bVerboseTiming:
print("plotting read_Zynq_ddr {} pts : elapsed = {}".format(totalNumberOfPoints, (time.process_time()-time_start)))
##########################################################
#Saving
if self.checkBox_autoSaveOnAcq.isChecked():
progress_callback.emit('Saving')
time_start = time.process_time()
self.saveData()
if bVerboseTiming:
print("saving read_Zynq_ddr {} pts : elapsed = {}".format(totalNumberOfPoints, (time.process_time()-time_start)))
#Reset DMA FSM (active low)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 0)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 1)
def initUI(self):
# Connect function to buttons
self.pushButton_stopAcq.clicked.connect(self.stopAcquisition)
self.pushButton_singleAcq.clicked.connect(self.start_single)
self.pushButton_continuousAcq.clicked.connect(self.start_continuous)
self.lineEdit_downsampling.returnPressed.connect(self.changeSamplingRate)
self.lineEdit_numberOfPoints.returnPressed.connect(self.changeNumberOfPoints)
self.lineEdit_timeAcq.returnPressed.connect(self.changeTimeAcqLength)
self.radioButton_channel_in1.clicked.connect(self.changeChannel)
self.radioButton_channel_in2.clicked.connect(self.changeChannel)
self.radioButton_channel_in1and2.clicked.connect(self.changeChannel)
self.checkBox_timeDomainDisplay.clicked.connect(self.changePlotLayout)
self.checkBox_FrequencyDomainDisplay.clicked.connect(self.changePlotLayout)
self.lineEdit_numberOfWaveform.returnPressed.connect(self.changeNumberOfWaveform)
self.pushButton_saveWaveform.clicked.connect(self.saveData)
self.pushButton_browsePath.clicked.connect(self.browsePath)
self.lineEdit_savePath.returnPressed.connect(self.verifyPath)
self.pushButton_outputsParameters.clicked.connect(self.openOutputParameters)
# list of RGB tuples defining the colors (same colorset as matlab)
colors_list = [( 0, 0.4470, 0.7410),
(0.8500, 0.3250, 0.0980),
(0.9290, 0.6940, 0.1250),
(0.4940, 0.1840, 0.5560),
(0.4660, 0.6740, 0.1880),
(0.3010, 0.7450, 0.9330),
(0.6350, 0.0780, 0.1840)]
numColors = len(colors_list)
def changePlotLayout(self, state = None): #state is not use, but QCheckBox.clicked.connect return state, which sometime cause an exception
self.graphicsView.clear()
row = 0
if self.checkBox_timeDomainDisplay.isChecked():
self.qpltItem_time = self.graphicsView.addPlot(title='Time Domain', row=row, col=0)
self.qpltItem_time.setLabel('left', 'Voltage [V]')#, color='red', size=30)
self.qpltItem_time.setLabel('bottom', 'Time [s]')#, color='red', size=30)
# self.qpltItem_time.setClipToView(True)
self.qpltItem_time.setDownsampling(ds=10, auto=True, mode='peak') #subsample, mean, peak
self.timeCurve = []
self.timeCurve.append(self.qpltItem_time.plot(pen='b'))
self.timeCurve.append(self.qpltItem_time.plot(pen='r'))
row = row+1
if self.checkBox_FrequencyDomainDisplay.isChecked():
self.qpltItem_freq = self.graphicsView.addPlot(title='Frequency Domain', row=row, col=0)
self.qpltItem_freq.setLabel('left', 'Power [dB]')
self.qpltItem_freq.setLabel('bottom', 'Frequency [MHz]')
# self.qpltItem_freq.setClipToView(True)
self.qpltItem_freq.setDownsampling(ds=10, auto=True, mode='peak') #subsample, mean, peak
self.freqCurve = []
self.freqCurve.append(self.qpltItem_freq.plot(pen='b'))
self.freqCurve.append(self.qpltItem_freq.plot(pen='r'))
def start_continuous(self):
#Reset DMA FSM (active low)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 0)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 1)
self.start_acquisition(continuous = 1)
self.numberRemaining = self.numberToAcquire
def start_single(self):
#Reset DMA FSM (active low)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 0)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 1)
self.start_acquisition(continuous = 0)
def start_acquisition(self, continuous = 0):
self.continuousDataAcquisition = continuous
self.acq_active = 1
# set start
if self.checkBox_useTrigger.isChecked():
self.dev.write_Zynq_AXI_register_uint32(self.TRIG_REG, 0) # 0 before to make sure we have a rising edge
self.dev.write_Zynq_AXI_register_uint32(self.TRIG_REG, 1) # Start with trig need to stay high to register external trig
self.dev.write_Zynq_AXI_register_uint32(self.TRIG_REG, 0)
self.label_status.setText('Status : Waiting for trig')
else:
self.dev.write_Zynq_AXI_register_uint32(self.START_REG, 1)
self.dev.write_Zynq_AXI_register_uint32(self.START_REG, 0)
self.label_status.setText('Status : Acquisition')
totalNumberOfPoints = self.numberOfPoints * np.sum(self.channelValid)
timeToWait_in_ms = totalNumberOfPoints/self.actual_fs*1000
#print('time to wait : {}ms'.format(timeToWait_in_ms))
self.timerDataUpdate.singleShot(int(timeToWait_in_ms)+1, self.getDataFromZynq_thread)
def stopAcquisition(self):
self.label_status.setText('Status : Idle')
self.continuousDataAcquisition = 0
self.acq_active = 0
self.timerDataUpdate.stop()
def plot_timeDomain(self, data_in, channel = 0):
if self.checkBox_timeDomainDisplay.isChecked():
self.timeCurve[channel].clear()
time_axis = np.linspace(1, len(data_in), len(data_in))/self.actual_fs
self.timeCurve[channel].setData(time_axis,data_in)
# print('Voltage start -> end : {}'.format(data_in[0] - data_in[-1]))
def plot_frequencyDomain(self, data_in, channel = 0):
# Do we want to multiply data_in with a window?
# Do we want to remove DC (mean) component
if self.checkBox_FrequencyDomainDisplay.isChecked():
self.freqCurve[channel].clear()
N_fft = 2**(int(np.ceil(np.log2(len(data_in)))))
frequency_axis = np.linspace(0, (N_fft-1)/float(N_fft)*self.actual_fs, N_fft)
last_index_shown = int(np.round(len(frequency_axis)/2))
spc = np.abs(np.fft.fft(data_in, N_fft))
spc = 20*np.log10(spc + 1e-12) # -> dB (1e-12 to avoid log10(0))
self.freqCurve[channel].setData(frequency_axis[0:last_index_shown]/1e6, spc[0:last_index_shown])
def changeChannel(self):
if self.radioButton_channel_in1.isChecked():
channel = 1
self.channelValid = [1,0]
elif self.radioButton_channel_in2.isChecked():
channel = 2
self.channelValid = [0,1]
elif self.radioButton_channel_in1and2.isChecked():
channel = 3
self.channelValid = [1,1]
self.dev.write_Zynq_AXI_register_uint32(self.CHANNEL_REG, channel)
self.changeNumberOfPoints()
self.changePlotLayout()
def changeSamplingRate(self):
#TODO : implement a logic to avoid potential error if sampling rate is changed mid-acquisition
try :
downsample_value = int(float(self.lineEdit_downsampling.text()))
except ValueError:
downsample_value = self.downsample_value
self.lineEdit_downsampling.blockSignals(True)
self.lineEdit_downsampling.setText(str(downsample_value))
self.lineEdit_downsampling.blockSignals(False)
# make sure downsample_value is between 1 and 2^32
downsample_value = min(2**32,downsample_value)
downsample_value = max(1,downsample_value)
self.downsample_value = downsample_value
self.dev.write_Zynq_AXI_register_uint32(self.DOWNSAMPLE_REG, self.downsample_value-1)
self.actual_fs = self.fs/self.downsample_value
#Reset DMA FSM (active low)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 0)
self.dev.write_Zynq_AXI_register_uint32(self.RESET_DMA_REG, 1)
self.label_samplingRate.setText('fs = {:.3e} Hz'.format(self.actual_fs))
self.lineEdit_timeAcq.blockSignals(True)
self.lineEdit_timeAcq.setText('{:.3e}'.format(self.numberOfPoints/self.actual_fs))
self.lineEdit_timeAcq.blockSignals(False)
def changeNumberOfPoints(self):
if self.lineEdit_numberOfPoints.text().upper() == 'MAX':
numberOfPoints = int(self.MAXPOINTS/np.sum(self.channelValid))
self.lineEdit_numberOfPoints.blockSignals(True)
self.lineEdit_numberOfPoints.setText(str(numberOfPoints))
self.lineEdit_numberOfPoints.blockSignals(False)
else:
try:
numberOfPoints = int(float(self.lineEdit_numberOfPoints.text()))
except ValueError:
numberOfPoints = self.numberOfPoints
self.lineEdit_numberOfPoints.blockSignals(True)
self.lineEdit_numberOfPoints.setText(str(numberOfPoints))
self.lineEdit_numberOfPoints.blockSignals(False)
#under 256 points, FPGA memory never fill
numberOfPoints_constraint = self.constraintNumber(numberOfPoints, 256, self.MAXPOINTS/np.sum(self.channelValid))
if numberOfPoints_constraint != numberOfPoints:
# This means numberOfPoints was changed by it's limits
numberOfPoints = numberOfPoints_constraint
self.lineEdit_timeAcq.blockSignals(True)
self.lineEdit_numberOfPoints.setText(str(int(numberOfPoints)))
self.lineEdit_timeAcq.blockSignals(False)
self.numberOfPoints = numberOfPoints
self.lineEdit_timeAcq.blockSignals(True)
self.lineEdit_timeAcq.setText('{:.3e}'.format(self.numberOfPoints/self.actual_fs))
self.lineEdit_timeAcq.blockSignals(False)
self.dev.write_Zynq_AXI_register_uint32(self.N_BYTES_REG, 2*numberOfPoints*np.sum(self.channelValid))
def changeTimeAcqLength(self):
if self.lineEdit_timeAcq.text().upper() == 'MAX':
self.lineEdit_numberOfPoints.blockSignals(True)
self.lineEdit_numberOfPoints.setText('MAX')
self.lineEdit_numberOfPoints.blockSignals(False)
else:
try:
numberOfPoints = round(float(self.lineEdit_timeAcq.text())*self.actual_fs)
except ValueError:
numberOfPoints = self.numberOfPoints
self.lineEdit_numberOfPoints.blockSignals(True)
self.lineEdit_numberOfPoints.setText(str(numberOfPoints))
self.lineEdit_numberOfPoints.blockSignals(False)
self.changeNumberOfPoints()
def constraintNumber(self, number, min, max):
if number <= min:
return min
elif number >= max:
return max
else:
return number
def changeNumberOfWaveform(self):
try:
numberToAcquire = int(self.lineEdit_numberOfWaveform.text())
except ValueError:
numberToAcquire = 0
self.lineEdit_numberOfWaveform.setText(str(numberToAcquire))
self.numberToAcquire = numberToAcquire
def browsePath(self):
curDir = os.getcwd()
path = QtWidgets.QFileDialog.getExistingDirectory(self, "Select a emplacement to save data", curDir, QtGui.QFileDialog.ShowDirsOnly)
if path != '':
self.lineEdit_savePath.setText(path)
self.path = path
def verifyPath(self):
path = self.lineEdit_savePath.text()
self.make_sure_path_exists(path)
self.path = path
def saveData(self):
if self.channelValid == [1,0]:
self.saveSingle('IN1')
elif self.channelValid == [0,1]:
self.saveSingle('IN2')
elif self.channelValid == [1,1]:
self.saveDual('IN1','IN2')
else:
return
def saveSingle(self, channelName):
fileName = self.lineEdit_fileName.text()
# find next unused filename
file_exist = True
file_number = 0
while (file_exist):
file_number = file_number + 1
fileName_long = channelName + fileName + '{:d}'.format(file_number) + '.bin'
fileName_plusPath = self.path + '/' + fileName_long
file_exist = os.path.exists(fileName_plusPath)
file_output = open(fileName_plusPath, 'wb')
file_output.write(self.data_in_bin.tobytes())
file_output.close()
def saveDual(self, channel1Name, channel2Name):
fileName = self.lineEdit_fileName.text()
file_exist = True
file_number = 0
while (file_exist):
file_number = file_number + 1
file1Name_long = channel1Name + fileName + '{:d}'.format(file_number) + '.bin'
file1Name_plusPath = self.path + '/' + file1Name_long
file2Name_long = channel2Name + fileName + '{:d}'.format(file_number) + '.bin'
file2Name_plusPath = self.path + '/' + file2Name_long
file_exist = os.path.exists(file1Name_plusPath) or os.path.exists(file2Name_plusPath) # False when both are false
file1_output = open(file1Name_plusPath, 'wb')
file1_output.write(self.data_in_bin[1::2].tobytes())
file1_output.close()
file2_output = open(file2Name_plusPath, 'wb')
file2_output.write(self.data_in_bin[::2].tobytes())
file2_output.close()
def openOutputParameters(self):
self.outputParameters = outputs_parameters(self.dev)
# (from jddes' DPLL software):
# read the Zynq's current temperature
def readZynqTemperature(self):
###########################################################################
# Reading the XADC values:
# See Xilinx document UG480 chapter 2 for conversion factors
# we use 2**16 instead of 2**12 for the denominator because the codes are "MSB-aligned" in the register (equivalent to a multiplication by 2**4)
xadc_temperature_code_to_degC = lambda x: x*503.975/2.**16-273.15
# time_start = time.process_time()
# average 10 readings because otherwise they are quite noisy:
# this reading loop takes just 2 ms for 10 readings at the moment so there is no real cost
N_average = 10.
reg_avg = 0.
for k in range(int(N_average)):
reg = self.dev.read_Zynq_AXI_register_uint32(self.xadc_base_addr+0x200)
reg_avg += float(reg)
reg_avg = float(reg_avg)/N_average
# print("elapsed = %f" % (time.process_time()-time_start))
ZynqTempInDegC = xadc_temperature_code_to_degC( reg_avg )
return ZynqTempInDegC
# From: http://stackoverflow.com/questions/273192/create-directory-if-it-doesnt-exist-for-file-write
# took 300 us if path doesn't exist
# took 90 us if path exist
def make_sure_path_exists(self, path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
################################################################
## Main code
################################################################
def main():
import RP_PLL
IP = '192.168.0.150'
PORT = 5000
dev = RP_PLL.RP_PLL_device(None)
dev.OpenTCPConnection(IP, PORT)
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
ACQ = AcqCard(dev)
# Show GUI
ACQ.show()
# GUI.showMaximized()
# Execute application
app.exec_()
if __name__ == '__main__':
main()
| [
"PyQt5.QtCore.QCoreApplication.instance",
"RP_PLL.RP_PLL_device",
"pyqtgraph.setConfigOption",
"numpy.sum",
"PyQt5.QtWidgets.QFileDialog.getExistingDirectory",
"os.getcwd",
"Worker.Worker",
"time.process_time",
"PyQt5.QtCore.QThreadPool",
"outputs_parameters.outputs_parameters",
"os.path.exists"... | [((20405, 20431), 'RP_PLL.RP_PLL_device', 'RP_PLL.RP_PLL_device', (['None'], {}), '(None)\n', (20425, 20431), False, 'import RP_PLL\n'), ((20474, 20508), 'PyQt5.QtCore.QCoreApplication.instance', 'QtCore.QCoreApplication.instance', ([], {}), '()\n', (20506, 20508), False, 'from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic\n'), ((1676, 1718), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""leftButtonPan"""', '(False)'], {}), "('leftButtonPan', False)\n", (1694, 1718), True, 'import pyqtgraph as pg\n'), ((1721, 1758), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""w"""'], {}), "('background', 'w')\n", (1739, 1758), True, 'import pyqtgraph as pg\n'), ((1761, 1798), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""foreground"""', '"""k"""'], {}), "('foreground', 'k')\n", (1779, 1798), True, 'import pyqtgraph as pg\n'), ((1801, 1838), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""antialias"""', '(True)'], {}), "('antialias', True)\n", (1819, 1838), True, 'import pyqtgraph as pg\n'), ((1842, 1876), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""AcqCard_gui.ui"""', 'self'], {}), "('AcqCard_gui.ui', self)\n", (1852, 1876), False, 'from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic\n'), ((1904, 1919), 'PyQt5.Qt.QTimer', 'Qt.QTimer', (['self'], {}), '(self)\n', (1913, 1919), False, 'from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic\n'), ((2051, 2066), 'PyQt5.Qt.QTimer', 'Qt.QTimer', (['self'], {}), '(self)\n', (2060, 2066), False, 'from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic\n'), ((2088, 2108), 'PyQt5.QtCore.QThreadPool', 'QtCore.QThreadPool', ([], {}), '()\n', (2106, 2108), False, 'from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic\n'), ((5246, 5265), 'time.process_time', 'time.process_time', ([], {}), '()\n', (5263, 5265), False, 'import time\n'), ((5468, 5515), 'numpy.fromstring', 'np.fromstring', (['self.data_in_bin'], {'dtype': 'np.int16'}), '(self.data_in_bin, dtype=np.int16)\n', (5481, 5515), True, 'import numpy as np\n'), ((5851, 5870), 'time.process_time', 'time.process_time', ([], {}), '()\n', (5868, 5870), False, 'import time\n'), ((16813, 16824), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16822, 16824), False, 'import os\n'), ((16834, 16968), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QtWidgets.QFileDialog.getExistingDirectory', (['self', '"""Select a emplacement to save data"""', 'curDir', 'QtGui.QFileDialog.ShowDirsOnly'], {}), "(self,\n 'Select a emplacement to save data', curDir, QtGui.QFileDialog.ShowDirsOnly\n )\n", (16876, 16968), False, 'from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic\n'), ((18797, 18825), 'outputs_parameters.outputs_parameters', 'outputs_parameters', (['self.dev'], {}), '(self.dev)\n', (18815, 18825), False, 'from outputs_parameters import outputs_parameters\n'), ((20535, 20567), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (20557, 20567), False, 'from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic\n'), ((3998, 4026), 'Worker.Worker', 'Worker', (['self.getDataFromZynq'], {}), '(self.getDataFromZynq)\n', (4004, 4026), False, 'from Worker import Worker\n'), ((5876, 5901), 'numpy.sum', 'np.sum', (['self.channelValid'], {}), '(self.channelValid)\n', (5882, 5901), True, 'import numpy as np\n'), ((6836, 6855), 'time.process_time', 'time.process_time', ([], {}), '()\n', (6853, 6855), False, 'import time\n'), ((11344, 11369), 'numpy.sum', 'np.sum', (['self.channelValid'], {}), '(self.channelValid)\n', (11350, 11369), True, 'import numpy as np\n'), ((17740, 17773), 'os.path.exists', 'os.path.exists', (['fileName_plusPath'], {}), '(fileName_plusPath)\n', (17754, 17773), False, 'import os\n'), ((20086, 20103), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (20097, 20103), False, 'import os\n'), ((5316, 5341), 'numpy.sum', 'np.sum', (['self.channelValid'], {}), '(self.channelValid)\n', (5322, 5341), True, 'import numpy as np\n'), ((12532, 12558), 'numpy.fft.fft', 'np.fft.fft', (['data_in', 'N_fft'], {}), '(data_in, N_fft)\n', (12542, 12558), True, 'import numpy as np\n'), ((12572, 12593), 'numpy.log10', 'np.log10', (['(spc + 1e-12)'], {}), '(spc + 1e-12)\n', (12580, 12593), True, 'import numpy as np\n'), ((15757, 15782), 'numpy.sum', 'np.sum', (['self.channelValid'], {}), '(self.channelValid)\n', (15763, 15782), True, 'import numpy as np\n'), ((18380, 18414), 'os.path.exists', 'os.path.exists', (['file1Name_plusPath'], {}), '(file1Name_plusPath)\n', (18394, 18414), False, 'import os\n'), ((18418, 18452), 'os.path.exists', 'os.path.exists', (['file2Name_plusPath'], {}), '(file2Name_plusPath)\n', (18432, 18452), False, 'import os\n'), ((14469, 14494), 'numpy.sum', 'np.sum', (['self.channelValid'], {}), '(self.channelValid)\n', (14475, 14494), True, 'import numpy as np\n'), ((15110, 15135), 'numpy.sum', 'np.sum', (['self.channelValid'], {}), '(self.channelValid)\n', (15116, 15135), True, 'import numpy as np\n'), ((5688, 5707), 'time.process_time', 'time.process_time', ([], {}), '()\n', (5705, 5707), False, 'import time\n'), ((6632, 6651), 'time.process_time', 'time.process_time', ([], {}), '()\n', (6649, 6651), False, 'import time\n'), ((6981, 7000), 'time.process_time', 'time.process_time', ([], {}), '()\n', (6998, 7000), False, 'import time\n')] |
# -*- coding: utf-8 -*-
import numpy as np
def newton_solver( _jacobian, _residual, _u0, _tol, _n_max ):
it = 0
u_n = _u0
res = _residual( u_n )
res_norm = np.linalg.norm( res )
# print( 'Starting residual norm is %e' % res_norm )
while res_norm > _tol and it < _n_max:
it = it + 1
u_n_1 = 1.* u_n
u_n = u_n_1 - np.linalg.solve( _jacobian(u_n_1), _residual(u_n_1) )
res = _residual( u_n )
# print( 'Residual is' )
# print( res )
res_norm = np.linalg.norm( res )
# print( 'Iteration %d residual norm is %e' % (it, res_norm) )
return u_n | [
"numpy.linalg.norm"
] | [((184, 203), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {}), '(res)\n', (198, 203), True, 'import numpy as np\n'), ((536, 555), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {}), '(res)\n', (550, 555), True, 'import numpy as np\n')] |
import os
from typing import Union
import mlflow
import numpy as np
import pandas as pd
import torch
from ael import plot
def predict(model, AEVC, loader, scaler=None, baseline=None, device=None):
"""
Binding affinity predictions.
Parameters
----------
model: torch.nn.Module
Neural network
AEVC: torchani.AEVComputer
Atomic environment vector computer
loader:
Data loader
baseline: Tuple[np.ndarray, np.ndarray, np.ndarray]
Baseline for delta learning (PDB IDs, Vina, logK)
device: torch.device
Computation device
Returns
-------
Tuple[np.ndarray, np.ndarray, np.ndarray]
System identifiers, true valudes and predicted values
Notes
-----
The baseline for ∆-learning consists in the Autodock Vina score.
It is passed together with the corresponding PDB IDs and the
experimental :math:`\\log(K)`.
"""
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Move model to device
model.to(device)
# Model in evaluation mode
model.eval()
true = []
predictions = []
identifiers = []
if baseline is not None:
baseline_ids, baseline_values, logK = baseline
with torch.no_grad(): # Turn off gradient computation during inference
for ids, labels, species_coordinates_ligmasks in loader:
# Move data to device
labels = labels.to(device)
species = species_coordinates_ligmasks[0].to(device)
coordinates = species_coordinates_ligmasks[1].to(device)
if len(species_coordinates_ligmasks) == 2:
ligmasks = None
else:
ligmasks = species_coordinates_ligmasks[2].to(device)
# Compute AEV
aevs = AEVC.forward((species, coordinates)).aevs
# Forward pass
output = model(species, aevs, ligmasks)
output = output.cpu().numpy()
labels = labels.cpu().numpy()
if scaler is not None:
scaler.inverse_transform(output)
scaler.inverse_transform(labels)
if baseline is None:
# Store true and predicted values
predictions += output.tolist()
true += labels.tolist()
else: # Delta learning
# Store predicted values (delta) plus baseline
# This corresponds to the final prediction
# Mask baseline_ids with prediction identifiers
# This allows to make b_ids and ids identical when sorted
mask = np.isin(baseline_ids, ids)
# Select relevant baseline
b_ids = baseline_ids[mask]
b_vals = baseline_values[mask]
b_logK = logK[mask]
# Sort baseline values according to IDs
bsort = np.argsort(b_ids)
b_vals = b_vals[bsort]
b_logK = b_logK[bsort]
# Sort output values according to IDs
outsort = np.argsort(ids)
ids = ids[outsort] # IDs
output = output[outsort] # Deltas
# Compute final predicitons: output plus baseline
predictions += (output + b_vals).tolist()
# True values are stored in baseline file
# The labels are deltas, not true values
true += b_logK.tolist()
# Store systems identifiers
identifiers += ids.tolist()
# TODO: Work with numpy array directly instead of lists
return np.array(identifiers), np.array(true), np.array(predictions)
def evaluate(
models,
loader,
AEVC,
outpath: str,
stage: str = "predict",
scaler=None,
baseline=None,
plt: bool = True,
) -> None:
"""
Evaluate model performance on a given dataset.
Parameters
----------
model: torch.nn.Module
Neural network
loader:
Data loader
AEVC: torchani.AEVComputer
Atomic environment vector computer
outpath: str
Output path
stage: str
Evaluation stage (train, validation, test or predict)
baseline: Tuple[np.ndarray, np.ndarray, np.ndarray]
Baseline for delta learning (PDB IDs, Vina, logK)
plt: bool
Plotting flag
"""
assert stage in ["train", "valid", "test", "predict"]
results = {}
for idx, model in enumerate(models):
ids, true, predicted = predict(model, AEVC, loader, scaler, baseline)
# Store results
if idx == 0:
results["true"] = pd.Series(index=ids, data=true)
results[f"predicted_{idx}"] = pd.Series(index=ids, data=predicted)
# Build dataframe
# This takes care of possible different order of data in different models
df = pd.DataFrame(results)
# Compute averages and stds
df["avg"] = df.drop("true", axis="columns").mean(axis="columns")
df["std"] = df.drop("true", axis="columns").std(axis="columns")
csv = os.path.join(outpath, f"{stage}.csv")
df.to_csv(csv, float_format="%.5f")
mlflow.log_artifact(csv)
# Plot
if plt:
plot.regplot(
df["true"].to_numpy(),
df["avg"].to_numpy(),
std=df["std"].to_numpy(),
name=stage,
path=outpath,
)
if __name__ == "__main__":
import json
from torch.utils import data
from ael import argparsers, loaders, utils
args = argparsers.predictparser()
if args.device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device(args.device)
mlflow.set_experiment(args.experiment)
# Start MLFlow run (named predict)
with mlflow.start_run(run_name="predict"):
mlflow.log_param("device", args.device)
mlflow.log_param("distance", args.distance)
mlflow.log_param("dataset", args.dataset)
mlflow.log_param("datapaths", args.datapaths)
mlflow.log_param("batchsize", args.batchsize)
if args.chemap is not None:
with open(args.chemap, "r") as fin:
cmap = json.load(fin)
else:
cmap = None
if args.vscreening is None:
testdata: Union[loaders.PDBData, loaders.VSData] = loaders.PDBData(
args.dataset,
args.distance,
args.datapaths,
cmap,
desc="",
removeHs=args.removeHs,
ligmask=args.ligmask,
)
else:
testdata = loaders.VSData(
args.dataset,
args.distance,
args.datapaths,
cmap,
desc="",
removeHs=args.removeHs,
labelspath=args.vscreening,
)
amap = utils.load_amap(args.amap)
testdata.atomicnums_to_idxs(amap)
n_species = len(amap)
mlflow.log_param("n_species", n_species)
testloader = data.DataLoader(
testdata,
batch_size=args.batchsize,
shuffle=False,
collate_fn=loaders.pad_collate,
)
AEVC = utils.loadAEVC(args.aev)
models = [utils.loadmodel(m) for m in args.models]
evaluate(
models,
testloader,
AEVC,
args.outpath,
stage="predict",
baseline=None,
plt=args.plot,
)
| [
"numpy.isin",
"ael.utils.loadmodel",
"mlflow.log_artifact",
"ael.argparsers.predictparser",
"numpy.argsort",
"ael.utils.loadAEVC",
"torch.device",
"torch.no_grad",
"os.path.join",
"pandas.DataFrame",
"mlflow.start_run",
"mlflow.log_param",
"torch.utils.data.DataLoader",
"ael.utils.load_ama... | [((4906, 4927), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (4918, 4927), True, 'import pandas as pd\n'), ((5109, 5146), 'os.path.join', 'os.path.join', (['outpath', 'f"""{stage}.csv"""'], {}), "(outpath, f'{stage}.csv')\n", (5121, 5146), False, 'import os\n'), ((5191, 5215), 'mlflow.log_artifact', 'mlflow.log_artifact', (['csv'], {}), '(csv)\n', (5210, 5215), False, 'import mlflow\n'), ((5569, 5595), 'ael.argparsers.predictparser', 'argparsers.predictparser', ([], {}), '()\n', (5593, 5595), False, 'from ael import argparsers, loaders, utils\n'), ((5761, 5799), 'mlflow.set_experiment', 'mlflow.set_experiment', (['args.experiment'], {}), '(args.experiment)\n', (5782, 5799), False, 'import mlflow\n'), ((1279, 1294), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1292, 1294), False, 'import torch\n'), ((3671, 3692), 'numpy.array', 'np.array', (['identifiers'], {}), '(identifiers)\n', (3679, 3692), True, 'import numpy as np\n'), ((3694, 3708), 'numpy.array', 'np.array', (['true'], {}), '(true)\n', (3702, 3708), True, 'import numpy as np\n'), ((3710, 3731), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (3718, 3731), True, 'import numpy as np\n'), ((4759, 4795), 'pandas.Series', 'pd.Series', ([], {'index': 'ids', 'data': 'predicted'}), '(index=ids, data=predicted)\n', (4768, 4795), True, 'import pandas as pd\n'), ((5730, 5755), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (5742, 5755), False, 'import torch\n'), ((5849, 5885), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': '"""predict"""'}), "(run_name='predict')\n", (5865, 5885), False, 'import mlflow\n'), ((5896, 5935), 'mlflow.log_param', 'mlflow.log_param', (['"""device"""', 'args.device'], {}), "('device', args.device)\n", (5912, 5935), False, 'import mlflow\n'), ((5945, 5988), 'mlflow.log_param', 'mlflow.log_param', (['"""distance"""', 'args.distance'], {}), "('distance', args.distance)\n", (5961, 5988), False, 'import mlflow\n'), ((5997, 6038), 'mlflow.log_param', 'mlflow.log_param', (['"""dataset"""', 'args.dataset'], {}), "('dataset', args.dataset)\n", (6013, 6038), False, 'import mlflow\n'), ((6047, 6092), 'mlflow.log_param', 'mlflow.log_param', (['"""datapaths"""', 'args.datapaths'], {}), "('datapaths', args.datapaths)\n", (6063, 6092), False, 'import mlflow\n'), ((6102, 6147), 'mlflow.log_param', 'mlflow.log_param', (['"""batchsize"""', 'args.batchsize'], {}), "('batchsize', args.batchsize)\n", (6118, 6147), False, 'import mlflow\n'), ((6965, 6991), 'ael.utils.load_amap', 'utils.load_amap', (['args.amap'], {}), '(args.amap)\n', (6980, 6991), False, 'from ael import argparsers, loaders, utils\n'), ((7075, 7115), 'mlflow.log_param', 'mlflow.log_param', (['"""n_species"""', 'n_species'], {}), "('n_species', n_species)\n", (7091, 7115), False, 'import mlflow\n'), ((7138, 7241), 'torch.utils.data.DataLoader', 'data.DataLoader', (['testdata'], {'batch_size': 'args.batchsize', 'shuffle': '(False)', 'collate_fn': 'loaders.pad_collate'}), '(testdata, batch_size=args.batchsize, shuffle=False,\n collate_fn=loaders.pad_collate)\n', (7153, 7241), False, 'from torch.utils import data\n'), ((7313, 7337), 'ael.utils.loadAEVC', 'utils.loadAEVC', (['args.aev'], {}), '(args.aev)\n', (7327, 7337), False, 'from ael import argparsers, loaders, utils\n'), ((4688, 4719), 'pandas.Series', 'pd.Series', ([], {'index': 'ids', 'data': 'true'}), '(index=ids, data=true)\n', (4697, 4719), True, 'import pandas as pd\n'), ((6409, 6534), 'ael.loaders.PDBData', 'loaders.PDBData', (['args.dataset', 'args.distance', 'args.datapaths', 'cmap'], {'desc': '""""""', 'removeHs': 'args.removeHs', 'ligmask': 'args.ligmask'}), "(args.dataset, args.distance, args.datapaths, cmap, desc='',\n removeHs=args.removeHs, ligmask=args.ligmask)\n", (6424, 6534), False, 'from ael import argparsers, loaders, utils\n'), ((6695, 6825), 'ael.loaders.VSData', 'loaders.VSData', (['args.dataset', 'args.distance', 'args.datapaths', 'cmap'], {'desc': '""""""', 'removeHs': 'args.removeHs', 'labelspath': 'args.vscreening'}), "(args.dataset, args.distance, args.datapaths, cmap, desc='',\n removeHs=args.removeHs, labelspath=args.vscreening)\n", (6709, 6825), False, 'from ael import argparsers, loaders, utils\n'), ((7357, 7375), 'ael.utils.loadmodel', 'utils.loadmodel', (['m'], {}), '(m)\n', (7372, 7375), False, 'from ael import argparsers, loaders, utils\n'), ((991, 1016), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1014, 1016), False, 'import torch\n'), ((2673, 2699), 'numpy.isin', 'np.isin', (['baseline_ids', 'ids'], {}), '(baseline_ids, ids)\n', (2680, 2699), True, 'import numpy as np\n'), ((2951, 2968), 'numpy.argsort', 'np.argsort', (['b_ids'], {}), '(b_ids)\n', (2961, 2968), True, 'import numpy as np\n'), ((3128, 3143), 'numpy.argsort', 'np.argsort', (['ids'], {}), '(ids)\n', (3138, 3143), True, 'import numpy as np\n'), ((5665, 5690), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5688, 5690), False, 'import torch\n'), ((6256, 6270), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (6265, 6270), False, 'import json\n')] |
import numpy as np
from hyperopt import hp
# required params:
# - embedding_size
# - lr
# - batch_size
# - max_iter
# - neg_ratio
# - contiguous_sampling
# - valid_every: set it to 0 to enable early stopping
param_space_TransE = {
# "embedding_size": hp.quniform("embedding_size", 50, 200, 10),
"embedding_size": 200,
"margin": hp.quniform("margin", 0.5, 5, 0.5),
"lr": hp.qloguniform("lr", np.log(1e-4), np.log(1e-2), 1e-4),
"batch_size": 5000,
"max_iter": 100000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 5000,
}
param_space_TransE_fb3m = {
# "embedding_size": hp.quniform("embedding_size", 50, 200, 10),
"embedding_size": 50,
"margin": hp.quniform("margin", 0.5, 5, 0.5),
"lr": hp.qloguniform("lr", np.log(1e-3), np.log(1e-2), 2e-4),
"batch_size": 5000,
"max_iter": 500000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 20000,
}
param_space_best_TransE_L2_wn18 = {
"embedding_size": 200,
"margin": 0.5,
"lr": 0.001,
"batch_size": 2000,
"max_iter": 2000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_best_TransE_L1_fb15k = {
"embedding_size": 190,
"margin": 3.5,
"lr": 0.001,
"batch_size": 5000,
"max_iter": 15000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_best_TransE_L1_fb3m = {
"embedding_size": 100,
"margin": 4.5,
"lr": 0.001,
"batch_size": 5000,
"max_iter": 120000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_DistMult = {
# "embedding_size": hp.quniform("embedding_size", 50, 200, 10),
"embedding_size": 200,
"l2_reg_lambda": hp.qloguniform("l2_reg_lambda", np.log(1e-3), np.log(1e-1), 1e-3),
"lr": hp.qloguniform("lr", np.log(1e-4), np.log(1e-2), 1e-4),
"batch_size": 5000,
"max_iter": 100000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 5000,
}
param_space_DistMult_fb3m = {
# "embedding_size": hp.quniform("embedding_size", 50, 200, 10),
"embedding_size": 50,
"l2_reg_lambda": hp.qloguniform("l2_reg_lambda", np.log(1e-3), np.log(1e-1), 1e-3),
"lr": hp.qloguniform("lr", np.log(1e-4), np.log(1e-2), 1e-4),
"batch_size": 5000,
"max_iter": 500000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 20000,
}
param_space_best_DistMult_tanh_wn18 = {
"embedding_size": 150,
"l2_reg_lambda": 0.0026,
"lr": 0.011,
"batch_size": 2000,
"max_iter": 15000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_best_DistMult_tanh_fb15k = {
"embedding_size": 200,
"l2_reg_lambda": 0.0009,
"lr": 0.001,
"batch_size": 5000,
"max_iter": 55000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_best_DistMult_tanh_fb3m = {
"embedding_size": 100,
"l2_reg_lambda": 0.054,
"lr": 0.0035,
"batch_size": 5000,
"max_iter": 60000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_NTN = {
"embedding_size": 50,
"k": 2,
"l2_reg_lambda": hp.qloguniform("l2_reg_lambda", np.log(1e-3), np.log(1e-1), 1e-3),
"lr": hp.qloguniform("lr", np.log(1e-4), np.log(1e-2), 1e-4),
"batch_size": 5000,
"max_iter": 100000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 5000,
}
param_space_best_NTN_wn18 = {
"embedding_size": 66,
"k": 2,
"l2_reg_lambda": 0.0002,
"lr": 0.001,
"batch_size": 2000,
"max_iter": 100000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_best_NTN_fb15k = {
"embedding_size": 120,
"k": 2,
"l2_reg_lambda": 0.0001,
"lr": 0.001,
"batch_size": 5000,
"max_iter": 50000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_Complex = {
# "embedding_size": hp.quniform("embedding_size", 50, 200, 10),
"embedding_size": 200,
"l2_reg_lambda": hp.qloguniform("l2_reg_lambda", np.log(1e-3), np.log(1e-1), 1e-3),
"lr": hp.qloguniform("lr", np.log(1e-4), np.log(1e-2), 1e-4),
"batch_size": 5000,
"max_iter": 100000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 5000,
}
param_space_Complex_fb3m = {
"embedding_size": 50,
"l2_reg_lambda": hp.qloguniform("l2_reg_lambda", np.log(1e-3), np.log(1e-1), 1e-3),
"lr": hp.qloguniform("lr", np.log(1e-4), np.log(1e-2), 1e-4),
"batch_size": 5000,
"max_iter": 500000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 20000,
}
param_space_best_Complex_wn18 = {
"embedding_size": 180,
"l2_reg_lambda": 0.0073,
"lr": 0.002,
"batch_size": 2000,
"max_iter": 25000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_best_Complex_tanh_fb15k = {
"embedding_size": 140,
"l2_reg_lambda": 0.0172,
"lr": 0.001,
"batch_size": 5000,
"max_iter": 80000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_best_Complex_tanh_fb3m = {
"embedding_size": 50,
"l2_reg_lambda": 0.031,
"lr": 0.0013,
"batch_size": 5000,
"max_iter": 60000,
"neg_ratio": 1,
"contiguous_sampling": False,
"valid_every": 0,
}
param_space_dict = {
"TransE_L2": param_space_TransE,
"TransE_L1": param_space_TransE,
"TransE_L2_fb3m": param_space_TransE_fb3m,
"TransE_L1_fb3m": param_space_TransE_fb3m,
"best_TransE_L2_wn18": param_space_best_TransE_L2_wn18,
"best_TransE_L1_fb15k": param_space_best_TransE_L1_fb15k,
"best_TransE_L1_fb3m": param_space_best_TransE_L1_fb3m,
"DistMult": param_space_DistMult,
"DistMult_tanh": param_space_DistMult,
"DistMult_tanh_fb3m": param_space_DistMult_fb3m,
"best_DistMult_tanh_wn18": param_space_best_DistMult_tanh_wn18,
"best_DistMult_tanh_fb15k": param_space_best_DistMult_tanh_fb15k,
"best_DistMult_tanh_fb3m": param_space_best_DistMult_tanh_fb3m,
"NTN": param_space_NTN,
"best_NTN_wn18": param_space_best_NTN_wn18,
"best_NTN_fb15k": param_space_best_NTN_fb15k,
"Complex": param_space_Complex,
"Complex_tanh": param_space_Complex,
"Complex_fb3m": param_space_Complex_fb3m,
"Complex_tanh_fb3m": param_space_Complex_fb3m,
"best_Complex_wn18": param_space_best_Complex_wn18,
"best_Complex_tanh_fb15k": param_space_best_Complex_tanh_fb15k,
"best_Complex_tanh_fb3m": param_space_best_Complex_tanh_fb3m,
}
int_params = [
"embedding_size", "batch_size", "max_iter", "neg_ratio", "valid_every", "k",
"fe_size", "hidden_size", "hidden_layers",
]
class ModelParamSpace:
def __init__(self, learner_name):
s = "Invalid model name! (Check model_param_space.py)"
assert learner_name in param_space_dict, s
self.learner_name = learner_name
def _build_space(self):
return param_space_dict[self.learner_name]
def _convert_into_param(self, param_dict):
if isinstance(param_dict, dict):
for k, v in param_dict.items():
if k in int_params:
param_dict[k] = int(v)
elif isinstance(v, list) or isinstance(v, tuple):
for i in range(len(v)):
self._convert_into_param(v[i])
elif isinstance(v, dict):
self._convert_into_param(v)
return param_dict
| [
"numpy.log",
"hyperopt.hp.quniform"
] | [((342, 376), 'hyperopt.hp.quniform', 'hp.quniform', (['"""margin"""', '(0.5)', '(5)', '(0.5)'], {}), "('margin', 0.5, 5, 0.5)\n", (353, 376), False, 'from hyperopt import hp\n'), ((710, 744), 'hyperopt.hp.quniform', 'hp.quniform', (['"""margin"""', '(0.5)', '(5)', '(0.5)'], {}), "('margin', 0.5, 5, 0.5)\n", (721, 744), False, 'from hyperopt import hp\n'), ((409, 423), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (415, 423), True, 'import numpy as np\n'), ((423, 435), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (429, 435), True, 'import numpy as np\n'), ((777, 790), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (783, 790), True, 'import numpy as np\n'), ((791, 803), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (797, 803), True, 'import numpy as np\n'), ((1792, 1805), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (1798, 1805), True, 'import numpy as np\n'), ((1806, 1817), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (1812, 1817), True, 'import numpy as np\n'), ((1858, 1872), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (1864, 1872), True, 'import numpy as np\n'), ((1872, 1884), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (1878, 1884), True, 'import numpy as np\n'), ((2200, 2213), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (2206, 2213), True, 'import numpy as np\n'), ((2214, 2225), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (2220, 2225), True, 'import numpy as np\n'), ((2266, 2280), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (2272, 2280), True, 'import numpy as np\n'), ((2280, 2292), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (2286, 2292), True, 'import numpy as np\n'), ((3261, 3274), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (3267, 3274), True, 'import numpy as np\n'), ((3275, 3286), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (3281, 3286), True, 'import numpy as np\n'), ((3327, 3341), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (3333, 3341), True, 'import numpy as np\n'), ((3341, 3353), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (3347, 3353), True, 'import numpy as np\n'), ((4147, 4160), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (4153, 4160), True, 'import numpy as np\n'), ((4161, 4172), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (4167, 4172), True, 'import numpy as np\n'), ((4213, 4227), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (4219, 4227), True, 'import numpy as np\n'), ((4227, 4239), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (4233, 4239), True, 'import numpy as np\n'), ((4486, 4499), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (4492, 4499), True, 'import numpy as np\n'), ((4500, 4511), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (4506, 4511), True, 'import numpy as np\n'), ((4552, 4566), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (4558, 4566), True, 'import numpy as np\n'), ((4566, 4578), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (4572, 4578), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import threading
import gc
from time import time, sleep
import numpy as np
import cv2
import wx
from slim_anywhere_v2 import SlimFace
global_fps = 24
os.environ["UBUNTU_MENUPROXY"]="0"
class ImagePanel(wx.Panel):
def __init__(self, parent, size):
super(ImagePanel, self).__init__(parent, size=size)
self.SetDoubleBuffered(True)
self.size = size
im = np.zeros((self.size[1], self.size[0], 3), dtype=np.uint8)
self.bmp = wx.BitmapFromBuffer(self.size[0], self.size[1], im)
self.Bind(wx.EVT_PAINT, self.on_paint)
def on_paint(self, evt):
dc = wx.BufferedPaintDC(self)
dc.DrawBitmap(self.bmp, 0, 0)
def draw_frame(self, im):
im_result = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im_result = cv2.resize(im_result, self.size)
self.bmp.CopyFromBuffer(im_result)
self.Refresh()
class MainFrame(wx.Frame):
def __init__(self, parent, size, title=''):
super(MainFrame, self).__init__(parent, size=size, title=title)
self.CreateMenu()
self.InitUI()
self.video_capture = None
self.slim = SlimFace()
self.slim_params = [0, 0, 0, 0, 0, 0]
self.cover = None
self.choose_frame = False
def CreateMenu(self):
menu_bar = wx.MenuBar()
menu = wx.Menu()
item_open = wx.MenuItem(menu, id=wx.ID_OPEN, text='open',
kind=wx.ITEM_NORMAL)
menu.Append(item_open)
menu.AppendSeparator()
menu_bar.Append(menu, title='File')
self.SetMenuBar(menu_bar)
self.Bind(wx.EVT_MENU, self.menu_handler)
def menu_handler(self, event):
id = event.GetId()
if id == wx.ID_OPEN:
file_wildcard = 'Videos(*.mp4)|*.mp4|*.avi|*.mov|*.jpg|*.png'
dlg = wx.FileDialog(self, "Open Video Or Picture ... ",
os.getcwd(),
style=wx.FD_OPEN | wx.FD_CHANGE_DIR,
wildcard=file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
self.open_video(dlg.GetPath())
dlg.Destroy()
def InitUI(self):
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.left_panel = wx.Panel(self, size=(300, 600))
self.create_sliders(self.left_panel)
self.create_buttons(self.left_panel)
self.create_next_frame(self.left_panel)
hbox.Add(self.left_panel, 1, wx.FIXED_MINSIZE)
self.image_panel = ImagePanel(self, size=(800, 600))
hbox.Add(self.image_panel, 1, wx.EXPAND)
self.SetSizer(hbox)
def open_video(self, path):
self.video_path = path
if self.video_capture:
self.video_capture.release()
self.video_path = path
self.video_capture = cv2.VideoCapture(path)
_, im = self.video_capture.read()
self.cover = im.copy()
self.image_panel.draw_frame(self.cover)
def get_next_frame(self, x):
self.choose_frame = True
_, im = self.video_capture.read()
self.cover = im.copy()
self.image_panel.draw_frame(self.cover)
def create_sliders(self, parent):
# from ipdb import set_trace; set_trace()
# for i in range(1):
for i in range(6):
# sl = wx.Slider(parent, size=(100, -1), pos=(60, i * 100),
# value=0, minValue=-50, maxValue=50,
# style=wx.SL_HORIZONTAL | wx.SL_LABELS,
# name=str(i))
sl = wx.Slider(parent, size=(100, -1), pos=(60, i * 100),
value=0, minValue=-50, maxValue=50,
style=wx.SL_HORIZONTAL,
name=str(i))
sl.Bind(wx.EVT_SCROLL_CHANGED, self.OnSliderScroll)
def create_buttons(self, parent):
self.eval_button = wx.Button(parent, -1, "RUN", pos=(15, 548))
self.eval_button.Bind(wx.EVT_BUTTON, self.OnBtnClick)
self.msg_text = wx.StaticText(parent, -1, label="", pos=(15, 580))
def create_next_frame(self, parent):
self.eval_button1 = wx.Button(parent, -1, "Next", pos=(110, 548))
self.eval_button1.Bind(wx.EVT_BUTTON, self.get_next_frame)
def OnSliderScroll(self, e):
slider = e.GetEventObject()
name = slider.GetName()
val = slider.GetValue()
self.slim_params[int(name)] = val
print("!!!!!!", name, val. self.slim_params)
self.apply_slim_action()
def apply_slim_action(self):
self.slim.set_slim_strength(cheek_strength=self.slim_params[0] / 100 * 4,
humerus_strength=self.slim_params[1] / 100 * 0.4,
chin_strength=self.slim_params[2] / 100 * 3,
forehead_strength=self.slim_params[3] / 100 * 1.5,
pull_chin_strength=self.slim_params[4] / 100 * 1,
pull_forehead_strength=self.slim_params[5] / 100 * 2.5,
)
self.slim.update_pixel_list(self.cover)
res = self.slim.slim_handler(self.cover)
self.image_panel.draw_frame(res)
def OnBtnClick(self, event):
if self.choose_frame:
if self.video_capture:
self.video_capture.release()
self.video_capture = cv2.VideoCapture(self.video_path)
st = threading.Thread(target=self.apply_video, args=(event, ))
st.start()
def apply_video(self, e):
save_dir = os.path.basename(self.video_path)
save_dir = os.path.splitext(save_dir)[0]
if not os.path.exists(save_dir):
os.mkdir(save_dir)
frame_count = self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT)
idx = 0
while True:
_, im = self.video_capture.read()
if im is None:
break
print("idx >>>>>>> ", idx)
res = self.slim.slim_handler(im)
cv2.imwrite(os.path.join(save_dir, "%06d.png" % idx), res)
# self.image_panel.draw_frame(res)
wx.CallAfter(self.image_panel.draw_frame, res)
# self.msg_text.SetLabel("%d/%d" % (idx, frame_count))
wx.CallAfter(self.msg_text.SetLabel, "%d/%d" % (idx, frame_count))
gc.collect()
idx += 1
def main():
app = wx.App()
frame = MainFrame(None, size=(1000 + 200, 600), title='Slim Face')
frame.Centre()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
# slim = SlimFace()
# im = cv2.imread('./input/捕获.PNG')
# sp = time()
# slim.set_slim_strength(cheek_strength=2, humerus_strength=2, chin_strength=3)
# res = slim.slim_handler(im)
# print(time() - sp)
# cv2.imwrite('./xx.jpg', res) | [
"wx.Menu",
"os.mkdir",
"wx.CallAfter",
"wx.BufferedPaintDC",
"gc.collect",
"os.path.join",
"cv2.cvtColor",
"os.path.exists",
"wx.Panel",
"slim_anywhere_v2.SlimFace",
"cv2.resize",
"wx.MenuBar",
"threading.Thread",
"wx.BoxSizer",
"os.path.basename",
"wx.StaticText",
"wx.App",
"os.ge... | [((6479, 6487), 'wx.App', 'wx.App', ([], {}), '()\n', (6485, 6487), False, 'import wx\n'), ((427, 484), 'numpy.zeros', 'np.zeros', (['(self.size[1], self.size[0], 3)'], {'dtype': 'np.uint8'}), '((self.size[1], self.size[0], 3), dtype=np.uint8)\n', (435, 484), True, 'import numpy as np\n'), ((504, 555), 'wx.BitmapFromBuffer', 'wx.BitmapFromBuffer', (['self.size[0]', 'self.size[1]', 'im'], {}), '(self.size[0], self.size[1], im)\n', (523, 555), False, 'import wx\n'), ((646, 670), 'wx.BufferedPaintDC', 'wx.BufferedPaintDC', (['self'], {}), '(self)\n', (664, 670), False, 'import wx\n'), ((760, 795), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (772, 795), False, 'import cv2\n'), ((816, 848), 'cv2.resize', 'cv2.resize', (['im_result', 'self.size'], {}), '(im_result, self.size)\n', (826, 848), False, 'import cv2\n'), ((1168, 1178), 'slim_anywhere_v2.SlimFace', 'SlimFace', ([], {}), '()\n', (1176, 1178), False, 'from slim_anywhere_v2 import SlimFace\n'), ((1333, 1345), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (1343, 1345), False, 'import wx\n'), ((1362, 1371), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (1369, 1371), False, 'import wx\n'), ((1393, 1459), 'wx.MenuItem', 'wx.MenuItem', (['menu'], {'id': 'wx.ID_OPEN', 'text': '"""open"""', 'kind': 'wx.ITEM_NORMAL'}), "(menu, id=wx.ID_OPEN, text='open', kind=wx.ITEM_NORMAL)\n", (1404, 1459), False, 'import wx\n'), ((2245, 2271), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (2256, 2271), False, 'import wx\n'), ((2299, 2330), 'wx.Panel', 'wx.Panel', (['self'], {'size': '(300, 600)'}), '(self, size=(300, 600))\n', (2307, 2330), False, 'import wx\n'), ((2860, 2882), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (2876, 2882), False, 'import cv2\n'), ((3939, 3982), 'wx.Button', 'wx.Button', (['parent', '(-1)', '"""RUN"""'], {'pos': '(15, 548)'}), "(parent, -1, 'RUN', pos=(15, 548))\n", (3948, 3982), False, 'import wx\n'), ((4069, 4119), 'wx.StaticText', 'wx.StaticText', (['parent', '(-1)'], {'label': '""""""', 'pos': '(15, 580)'}), "(parent, -1, label='', pos=(15, 580))\n", (4082, 4119), False, 'import wx\n'), ((4190, 4235), 'wx.Button', 'wx.Button', (['parent', '(-1)', '"""Next"""'], {'pos': '(110, 548)'}), "(parent, -1, 'Next', pos=(110, 548))\n", (4199, 4235), False, 'import wx\n'), ((5515, 5571), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.apply_video', 'args': '(event,)'}), '(target=self.apply_video, args=(event,))\n', (5531, 5571), False, 'import threading\n'), ((5642, 5675), 'os.path.basename', 'os.path.basename', (['self.video_path'], {}), '(self.video_path)\n', (5658, 5675), False, 'import os\n'), ((5468, 5501), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.video_path'], {}), '(self.video_path)\n', (5484, 5501), False, 'import cv2\n'), ((5695, 5721), 'os.path.splitext', 'os.path.splitext', (['save_dir'], {}), '(save_dir)\n', (5711, 5721), False, 'import os\n'), ((5740, 5764), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (5754, 5764), False, 'import os\n'), ((5778, 5796), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (5786, 5796), False, 'import os\n'), ((6216, 6262), 'wx.CallAfter', 'wx.CallAfter', (['self.image_panel.draw_frame', 'res'], {}), '(self.image_panel.draw_frame, res)\n', (6228, 6262), False, 'import wx\n'), ((6342, 6408), 'wx.CallAfter', 'wx.CallAfter', (['self.msg_text.SetLabel', "('%d/%d' % (idx, frame_count))"], {}), "(self.msg_text.SetLabel, '%d/%d' % (idx, frame_count))\n", (6354, 6408), False, 'import wx\n'), ((6421, 6433), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6431, 6433), False, 'import gc\n'), ((1950, 1961), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1959, 1961), False, 'import os\n'), ((6109, 6149), 'os.path.join', 'os.path.join', (['save_dir', "('%06d.png' % idx)"], {}), "(save_dir, '%06d.png' % idx)\n", (6121, 6149), False, 'import os\n')] |
#!/usr/bin/env python
"""
Draws a random x-y lineplot and makes a tool which
shows the closet point on the lineplot to the mouse position.
"""
#Major library imports
from numpy.random import random_sample
from numpy import arange
#Enthought library imports
from enable.api import Component, ComponentEditor, BaseTool
from traits.api import HasTraits, Instance, Any, Int
from traitsui.api import View, UItem
#Chaco imports
from chaco.api import Plot, ArrayPlotData, AbstractOverlay, ArrayDataSource
#===============================================================================
# # Create the Chaco custom tool
#===============================================================================
class HittestTool(BaseTool, AbstractOverlay):
'''This tool uses LinePlot.hittest() to get the closest point
on the line to the mouse position and to draw it to the screen.
Also implements an Overlay in order to draw the point.
'''
# A reference to the lineplot the tool acts on
line_plot = Any()
# Whether to draw the overlay
visible=True
# The point to draw on the plot, or None if no point
pt = Any()
# How many pixels away we may be from the line in order to do
threshold = Int(40)
def normal_mouse_move(self, event):
# Compute the nearest point and draw it whenever the mouse moves
x,y = event.x, event.y
if self.line_plot.orientation == "h":
x,y = self.component.map_data((x,y))
else:
x,y = self.component.map_data((y,x))
x,y = self.line_plot.map_screen((x,y))
self.pt = self.line_plot.hittest((x,y), threshold=self.threshold)
self.request_redraw()
def overlay(self, plot, gc, view_bounds=None, mode="normal"):
# If we have a point, draw it to the screen as a small square
if self.pt is not None:
x,y = plot.map_screen(self.pt)
gc.draw_rect((int(x)-2, int(y)-2, 4, 4))
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# make 10 random points
x = arange(10)
x = ArrayDataSource(x, sort_order="ascending")
y = random_sample(10)
# Plot the data
pd = ArrayPlotData(x=x, y=y)
plot = Plot(pd)
plot.orientation = 'v'
line_plot = plot.plot(("x", "y"))[0]
# Add the tool to the plot both as a tool and as an overlay
tool = HittestTool(component=plot, line_plot=line_plot)
plot.tools.append(tool)
plot.overlays.append(tool)
return plot
#===============================================================================
# Attributes to use for the plot view.
#===============================================================================
size = (800,600)
title="LinePlot Hittest Demo"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
UItem("plot", editor=ComponentEditor(size=size)),
resizable=True,
title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == '__main__':
demo.configure_traits()
#--EOF---
| [
"traits.api.Instance",
"traits.api.Any",
"numpy.random.random_sample",
"traits.api.Int",
"enable.api.ComponentEditor",
"chaco.api.ArrayDataSource",
"numpy.arange",
"chaco.api.ArrayPlotData",
"chaco.api.Plot"
] | [((1011, 1016), 'traits.api.Any', 'Any', ([], {}), '()\n', (1014, 1016), False, 'from traits.api import HasTraits, Instance, Any, Int\n'), ((1140, 1145), 'traits.api.Any', 'Any', ([], {}), '()\n', (1143, 1145), False, 'from traits.api import HasTraits, Instance, Any, Int\n'), ((1230, 1237), 'traits.api.Int', 'Int', (['(40)'], {}), '(40)\n', (1233, 1237), False, 'from traits.api import HasTraits, Instance, Any, Int\n'), ((2216, 2226), 'numpy.arange', 'arange', (['(10)'], {}), '(10)\n', (2222, 2226), False, 'from numpy import arange\n'), ((2235, 2277), 'chaco.api.ArrayDataSource', 'ArrayDataSource', (['x'], {'sort_order': '"""ascending"""'}), "(x, sort_order='ascending')\n", (2250, 2277), False, 'from chaco.api import Plot, ArrayPlotData, AbstractOverlay, ArrayDataSource\n'), ((2286, 2303), 'numpy.random.random_sample', 'random_sample', (['(10)'], {}), '(10)\n', (2299, 2303), False, 'from numpy.random import random_sample\n'), ((2334, 2357), 'chaco.api.ArrayPlotData', 'ArrayPlotData', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (2347, 2357), False, 'from chaco.api import Plot, ArrayPlotData, AbstractOverlay, ArrayDataSource\n'), ((2370, 2378), 'chaco.api.Plot', 'Plot', (['pd'], {}), '(pd)\n', (2374, 2378), False, 'from chaco.api import Plot, ArrayPlotData, AbstractOverlay, ArrayDataSource\n'), ((3150, 3169), 'traits.api.Instance', 'Instance', (['Component'], {}), '(Component)\n', (3158, 3169), False, 'from traits.api import HasTraits, Instance, Any, Int\n'), ((3224, 3250), 'enable.api.ComponentEditor', 'ComponentEditor', ([], {'size': 'size'}), '(size=size)\n', (3239, 3250), False, 'from enable.api import Component, ComponentEditor, BaseTool\n')] |
# import tensorflow as tf
import numpy as np
from torchvision.transforms.transforms import CenterCrop
import tqdm
import torch.nn.functional as F
# import sklearn
import matplotlib.pyplot as plt
import torch
# import tensorflow_datasets as tfds
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import TensorDataset, sampler, DataLoader
import urllib
import tarfile
import os
BUFFER_SIZE = 10000
SIZE = 32
# getImagesDS = lambda X, n: np.concatenate([x[0].numpy()[None,] for x in X.take(n)])
CIFAR10_TRAIN_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR10_TRAIN_STD = (0.247, 0.243, 0.261)
CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
FACESCRUB_TRAIN_MEAN = (0.5708, 0.5905, 0.4272)
FACESCRUB_TRAIN_STD = (0.2058, 0.2275, 0.2098)
TINYIMAGENET_TRAIN_MEAN = (0.5141, 0.5775, 0.3985)
TINYIMAGENET_TRAIN_STD = (0.2927, 0.2570, 0.1434)
SVHN_TRAIN_MEAN = (0.3522, 0.4004, 0.4463)
SVHN_TRAIN_STD = (0.1189, 0.1377, 0.1784)
def getImagesDS(X, n):
image_list = []
for i in range(n):
image_list.append(X[i][0].numpy()[None,])
return np.concatenate(image_list)
class DatasetSplit(torch.utils.data.Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
images, labels = self.dataset[self.idxs[item]]
return images, labels
def remove_class_loader(some_dataset, label_class, batch_size=16, num_workers=2):
def remove_one_label(target, label):
label_indices = []
excluded_indices = []
for i in range(len(target)):
if target[i] != label:
label_indices.append(i)
else:
excluded_indices.append(i)
return label_indices, excluded_indices
indices, excluded_indices = remove_one_label(some_dataset.targets, label_class)
new_data_loader = DataLoader(
some_dataset, shuffle=False, num_workers=num_workers, batch_size=batch_size, sampler = torch.utils.data.sampler.SubsetRandomSampler(indices))
excluded_data_loader = DataLoader(
some_dataset, shuffle=False, num_workers=num_workers, batch_size=batch_size, sampler = torch.utils.data.sampler.SubsetRandomSampler(excluded_indices))
return new_data_loader, excluded_data_loader
def noniid_unlabel(dataset, num_users, label_rate, noniid_ratio = 0.2, num_class = 10):
num_class_per_client = int(noniid_ratio * num_class)
num_shards, num_imgs = num_class_per_client * num_users, int(len(dataset)/num_users/num_class_per_client)
idx_shard = [i for i in range(num_shards)]
dict_users_unlabeled = {i: np.array([], dtype='int64') for i in range(num_users)}
idxs = np.arange(len(dataset))
labels = np.arange(len(dataset))
for i in range(len(dataset)):
labels[i] = dataset[i][1]
num_items = int(len(dataset)/num_users)
dict_users_labeled = set()
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]#索引值
idxs = idxs_labels[0,:]
# divide and assign
for i in range(num_users):
rand_set = set(np.random.choice(idx_shard, num_class_per_client, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users_unlabeled[i] = np.concatenate((dict_users_unlabeled[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)
dict_users_labeled = set(np.random.choice(list(idxs), int(len(idxs) * label_rate), replace=False))
for i in range(num_users):
dict_users_unlabeled[i] = set(dict_users_unlabeled[i])
# dict_users_labeled = dict_users_labeled | set(np.random.choice(list(dict_users_unlabeled[i]), int(num_items * label_rate), replace=False))
dict_users_unlabeled[i] = dict_users_unlabeled[i] - dict_users_labeled
return dict_users_labeled, dict_users_unlabeled
def noniid_alllabel(dataset, num_users, noniid_ratio = 0.2, num_class = 10):
num_class_per_client = int(noniid_ratio * num_class)
num_shards, num_imgs = num_class_per_client * num_users, int(len(dataset)/num_users/num_class_per_client)
idx_shard = [i for i in range(num_shards)]
dict_users_labeled = {i: np.array([], dtype='int64') for i in range(num_users)}
idxs = np.arange(len(dataset))
labels = np.arange(len(dataset))
for i in range(len(dataset)):
labels[i] = dataset[i][1]
num_items = int(len(dataset)/num_users)
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]#索引值
idxs = idxs_labels[0,:]
# divide and assign
for i in range(num_users):
rand_set = set(np.random.choice(idx_shard, num_class_per_client, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users_labeled[i] = np.concatenate((dict_users_labeled[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)
for i in range(num_users):
dict_users_labeled[i] = set(dict_users_labeled[i])
return dict_users_labeled
def load_fmnist():
xpriv = datasets.FashionMNIST(root='./data', train=True, download=True)
xpub = datasets.FashionMNIST(root='./data', train=False)
x_train = np.array(xpriv.data)
y_train = np.array(xpriv.targets)
x_test = np.array(xpub.data)
y_test = np.array(xpub.targets)
x_train = x_train[:, None, :, :]
x_test = x_test[:, None, :, :]
x_train = np.tile(x_train, (1,3,1,1))
x_test = np.tile(x_test, (1,3,1,1))
x_train = torch.Tensor(x_train)
y_train = torch.Tensor(y_train).type(torch.LongTensor)
x_test = torch.Tensor(x_test)
y_test = torch.Tensor(y_test).type(torch.LongTensor)
x_train = F.interpolate(x_train, (32, 32))
x_test = F.interpolate(x_test, (32, 32))
x_train = x_train / (255/2) - 1
x_test = x_test / (255/2) - 1
x_train = torch.clip(x_train, -1., 1.)
x_test = torch.clip(x_test, -1., 1.)
# Need a different way to denormalize
xpriv = TensorDataset(x_train, y_train)
xpub = TensorDataset(x_test, y_test)
return xpriv, xpub
def load_mnist():
xpriv = datasets.MNIST(root='./data', train=True, download=True)
xpub = datasets.MNIST(root='./data', train=False)
x_train = np.array(xpriv.data)
y_train = np.array(xpriv.targets)
x_test = np.array(xpub.data)
y_test = np.array(xpub.targets)
x_train = x_train[:, None, :, :]
x_test = x_test[:, None, :, :]
x_train = np.tile(x_train, (1,3,1,1))
x_test = np.tile(x_test, (1,3,1,1))
x_train = torch.Tensor(x_train)
y_train = torch.Tensor(y_train).type(torch.LongTensor)
x_test = torch.Tensor(x_test)
y_test = torch.Tensor(y_test).type(torch.LongTensor)
x_train = F.interpolate(x_train, (32, 32))
x_test = F.interpolate(x_test, (32, 32))
x_train = x_train / (255/2) - 1
x_test = x_test / (255/2) - 1
x_train = torch.clip(x_train, -1., 1.)
x_test = torch.clip(x_test, -1., 1.)
# Need a different way to denormalize
xpriv = TensorDataset(x_train, y_train)
xpub = TensorDataset(x_test, y_test)
return xpriv, xpub
def get_mnist_bothloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False):
""" return training dataloader
Args:
mean: mean of cifar10 training dataset
std: std of cifar10 training dataset
path: path to cifar10 training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
mnist_training, mnist_testing = load_mnist()
if num_client == 1:
mnist_training_loader = [torch.utils.data.DataLoader(mnist_training, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers)]
elif num_client > 1:
mnist_training_loader = []
for i in range(num_client):
mnist_training_subset = torch.utils.data.Subset(mnist_training, list(range(i * (len(mnist_training)//num_client), (i+1) * (len(mnist_training)//num_client))))
subset_training_loader = DataLoader(
mnist_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
mnist_training_loader.append(subset_training_loader)
mnist_testing_loader = torch.utils.data.DataLoader(mnist_testing, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
return mnist_training_loader, mnist_testing_loader
def get_fmnist_bothloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False):
""" return training dataloader
Args:
mean: mean of cifar10 training dataset
std: std of cifar10 training dataset
path: path to cifar10 training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
fmnist_training, fmnist_testing = load_fmnist()
if num_client == 1:
fmnist_training_loader = [torch.utils.data.DataLoader(fmnist_training, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers)]
elif num_client > 1:
fmnist_training_loader = []
for i in range(num_client):
fmnist_training_subset = torch.utils.data.Subset(fmnist_training, list(range(i * (len(fmnist_training)//num_client), (i+1) * (len(fmnist_training)//num_client))))
subset_training_loader = DataLoader(
fmnist_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
fmnist_training_loader.append(subset_training_loader)
fmnist_testing_loader = torch.utils.data.DataLoader(fmnist_testing, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
return fmnist_training_loader, fmnist_testing_loader
def get_facescrub_bothloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False):
""" return training dataloader
Args:
mean: mean of cifar10 training dataset
std: std of cifar10 training dataset
path: path to cifar10 training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(FACESCRUB_TRAIN_MEAN, FACESCRUB_TRAIN_STD)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(FACESCRUB_TRAIN_MEAN, FACESCRUB_TRAIN_STD)
])
if not os.path.isdir("./facescrub-dataset/32x32/train"):
os.system("git clone https://github.com/theothings/facescrub-dataset.git")
import subprocess
subprocess.call("python prepare_facescrub.py", shell=True)
facescrub_training = datasets.ImageFolder('facescrub-dataset/32x32/train', transform=transform_train)
facescrub_testing = datasets.ImageFolder('facescrub-dataset/32x32/validate', transform=transform_test)
if num_client == 1:
facescrub_training_loader = [torch.utils.data.DataLoader(facescrub_training, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers)]
elif num_client > 1:
facescrub_training_loader = []
for i in range(num_client):
mnist_training_subset = torch.utils.data.Subset(facescrub_training, list(range(i * (len(facescrub_training)//num_client), (i+1) * (len(facescrub_training)//num_client))))
subset_training_loader = DataLoader(
mnist_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
facescrub_training_loader.append(subset_training_loader)
facescrub_testing_loader = torch.utils.data.DataLoader(facescrub_testing, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
return facescrub_training_loader, facescrub_testing_loader
def get_tinyimagenet_bothloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False):
""" return training dataloader
Args:
mean: mean of cifar10 training dataset
std: std of cifar10 training dataset
path: path to cifar10 training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(TINYIMAGENET_TRAIN_MEAN, TINYIMAGENET_TRAIN_STD)
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize(TINYIMAGENET_TRAIN_MEAN, TINYIMAGENET_TRAIN_STD)
])
if not os.path.isdir("./tiny-imagenet-200/train"):
import subprocess
subprocess.call("python prepare_tinyimagenet.py", shell=True)
tinyimagenet_training = datasets.ImageFolder('tiny-imagenet-200/train', transform=transform_train)
tinyimagenet_testing = datasets.ImageFolder('tiny-imagenet-200/val', transform=transform_test)
if num_client == 1:
tinyimagenet_training_loader = [torch.utils.data.DataLoader(tinyimagenet_training, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers)]
elif num_client > 1:
tinyimagenet_training_loader = []
for i in range(num_client):
mnist_training_subset = torch.utils.data.Subset(tinyimagenet_training, list(range(i * (len(tinyimagenet_training)//num_client), (i+1) * (len(tinyimagenet_training)//num_client))))
subset_training_loader = DataLoader(
mnist_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
tinyimagenet_training_loader.append(subset_training_loader)
tinyimagenet_testing_loader = torch.utils.data.DataLoader(tinyimagenet_testing, batch_size=batch_size, shuffle=False,
num_workers=num_workers)
return tinyimagenet_training_loader, tinyimagenet_testing_loader
def get_purchase_trainloader():
DATASET_PATH='./datasets/purchase'
DATASET_NAME= 'dataset_purchase'
if not os.path.isdir(DATASET_PATH):
os.makedirs(DATASET_PATH)
DATASET_FILE = os.path.join(DATASET_PATH,DATASET_NAME)
if not os.path.isfile(DATASET_FILE):
print("Dowloading the dataset...")
urllib.request.urlretrieve("https://www.comp.nus.edu.sg/~reza/files/dataset_purchase.tgz",os.path.join(DATASET_PATH,'tmp.tgz'))
print('Dataset Dowloaded')
tar = tarfile.open(os.path.join(DATASET_PATH,'tmp.tgz'))
tar.extractall(path=DATASET_PATH)
data_set =np.genfromtxt(DATASET_FILE,delimiter=',')
X = data_set[:,1:].astype(np.float64)
Y = (data_set[:,0]).astype(np.int32)-1
len_train =len(X)
r = np.load('./dataset_shuffle/random_r_purchase100.npy')
X=X[r]
Y=Y[r]
train_classifier_ratio, train_attack_ratio = 0.1,0.15
train_classifier_data = X[:int(train_classifier_ratio*len_train)]
test_data = X[int((train_classifier_ratio+train_attack_ratio)*len_train):]
train_classifier_label = Y[:int(train_classifier_ratio*len_train)]
test_label = Y[int((train_classifier_ratio+train_attack_ratio)*len_train):]
xpriv = TensorDataset(train_classifier_data, train_classifier_label)
xpub = TensorDataset(test_data, test_label)
train_classifier_ratio, train_attack_ratio = 0.1,0.3
train_data = X[:int(train_classifier_ratio*len_train)]
test_data = X[int((train_classifier_ratio+train_attack_ratio)*len_train):]
train_label = Y[:int(train_classifier_ratio*len_train)]
test_label = Y[int((train_classifier_ratio+train_attack_ratio)*len_train):]
np.random.seed(100)
train_len = train_data.shape[0]
r = np.arange(train_len)
np.random.shuffle(r)
shadow_indices = r[:train_len//2]
target_indices = r[train_len//2:]
shadow_train_data, shadow_train_label = train_data[shadow_indices], train_label[shadow_indices]
target_train_data, target_train_label = train_data[target_indices], train_label[target_indices]
test_len = 1*train_len
r = np.arange(test_len)
np.random.shuffle(r)
shadow_indices = r[:test_len//2]
target_indices = r[test_len//2:]
shadow_test_data, shadow_test_label = test_data[shadow_indices], test_label[shadow_indices]
target_test_data, target_test_label = test_data[target_indices], test_label[target_indices]
shadow_train = tensor_data_create(shadow_train_data, shadow_train_label)
shadow_train_loader = DataLoader(shadow_train, batch_size=batch_size, shuffle=True, num_workers=1)
shadow_test = tensor_data_create(shadow_test_data, shadow_test_label)
shadow_test_loader = DataLoader(shadow_test, batch_size=batch_size, shuffle=True, num_workers=1)
target_train = tensor_data_create(target_train_data, target_train_label)
target_train_loader = DataLoader(target_train, batch_size=batch_size, shuffle=True, num_workers=1)
target_test = tensor_data_create(target_test_data, target_test_label)
target_test_loader = DataLoader(target_test, batch_size=batch_size, shuffle=True, num_workers=1)
print('Data loading finished')
return shadow_train_loader, shadow_test_loader, target_train_loader, target_test_loader
def get_cifar10_trainloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False, data_portion = 1.0, noniid_ratio = 1.0):
""" return training dataloader
Args:
mean: mean of cifar10 training dataset
std: std of cifar10 training dataset
path: path to cifar10 training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(CIFAR10_TRAIN_MEAN, CIFAR10_TRAIN_STD)
])
#cifar00_training = CIFAR10Train(path, transform=transform_train)
cifar10_training = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
indices = torch.randperm(len(cifar10_training))[:int(len(cifar10_training)* data_portion)]
cifar10_training = torch.utils.data.Subset(cifar10_training, indices)
if num_client == 1:
cifar10_training_loader = [DataLoader(
cifar10_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)]
elif num_client > 1:
cifar10_training_loader = []
if noniid_ratio < 1.0:
cifar10_training_subset_list = noniid_alllabel(cifar10_training, num_client, noniid_ratio, 100)
if not collude_use_public:
for i in range(num_client):
if noniid_ratio == 1.0:
cifar10_training_subset = torch.utils.data.Subset(cifar10_training, list(range(i * (len(cifar10_training)//num_client), (i+1) * (len(cifar10_training)//num_client))))
else:
cifar10_training_subset = DatasetSplit(cifar10_training, cifar10_training_subset_list[i])
subset_training_loader = DataLoader(
cifar10_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar10_training_loader.append(subset_training_loader)
else:
'''1 + collude + (n-2) vanilla clients, all training data is shared by n-1 clients'''
# cifar10_test = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_train)
# subset_training_loader = DataLoader(
# cifar10_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# cifar10_training_loader.append(subset_training_loader)
# for i in range(num_client-1):
# cifar10_training_subset = torch.utils.data.Subset(cifar10_training, list(range(i * (len(cifar10_training)//(num_client-1)), (i+1) * (len(cifar10_training)//(num_client-1)))))
# subset_training_loader = DataLoader(
# cifar10_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# cifar10_training_loader.append(subset_training_loader)
# # switch the testloader to collude position
# temp = cifar10_training_loader[0]
# cifar10_training_loader[0] = cifar10_training_loader[1]
# cifar10_training_loader[1] = temp
'''1+ (n-1) * collude, the single client gets all training data'''
subset_training_loader = DataLoader(
cifar10_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar10_training_loader.append(subset_training_loader)
cifar10_test = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_train)
for i in range(num_client-1):
subset_training_loader = DataLoader(
cifar10_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar10_training_loader.append(subset_training_loader)
cifar10_training2 = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
cifar10_training_mem = torch.utils.data.Subset(cifar10_training2, list(range(0, 5000)))
xmem_training_loader = DataLoader(
cifar10_training_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar10_testing_mem = torch.utils.data.Subset(cifar10_training2, list(range(5000, 10000)))
xmem_testing_loader = DataLoader(
cifar10_testing_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return cifar10_training_loader, xmem_training_loader, xmem_testing_loader
def get_cifar10_testloader(batch_size=16, num_workers=2, shuffle=True, extra_cls_removed_dataset = False, cls_to_remove = 0):
""" return training dataloader
Args:
mean: mean of cifar10 test dataset
std: std of cifar10 test dataset
path: path to cifar10 test python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: cifar10_test_loader:torch dataloader object
"""
transform_train = transforms.Compose([
#transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(CIFAR10_TRAIN_MEAN, CIFAR10_TRAIN_STD)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR10_TRAIN_MEAN, CIFAR10_TRAIN_STD)
])
transform_exlabel = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
cifar10_test = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
cifar10_test_loader = DataLoader(
cifar10_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar10_test2 = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_train)
cifar10_training_nomem = torch.utils.data.Subset(cifar10_test2, list(range(0, len(cifar10_test2)//2)))
nomem_training_loader = DataLoader(
cifar10_training_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar10_testing_nomem = torch.utils.data.Subset(cifar10_test2, list(range(len(cifar10_test2)//2, len(cifar10_test2))))
nomem_testing_loader = DataLoader(
cifar10_testing_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
if extra_cls_removed_dataset:
cifar10_training = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_exlabel)
cifar10_cls_rm_loader, cifar10_cls_ex_loader = remove_class_loader(cifar10_training, cls_to_remove, batch_size, num_workers)
return cifar10_test_loader, cifar10_cls_rm_loader, cifar10_cls_ex_loader
return cifar10_test_loader, nomem_training_loader, nomem_testing_loader
def get_cifar100_trainloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False, data_portion = 1.0, noniid_ratio = 1.0):
""" return training dataloader
Args:
mean: mean of cifar100 training dataset
std: std of cifar100 training dataset
path: path to cifar100 training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
transform_train = transforms.Compose([
#transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])
cifar100_training = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
indices = torch.randperm(len(cifar100_training))[:int(len(cifar100_training)* data_portion)]
cifar100_training = torch.utils.data.Subset(cifar100_training, indices)
if num_client == 1:
cifar100_training_loader = [DataLoader(
cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)]
elif num_client > 1:
cifar100_training_loader = []
if noniid_ratio < 1.0:
cifar100_training_subset_list = noniid_alllabel(cifar100_training, num_client, noniid_ratio, 100)
if not collude_use_public:
for i in range(num_client):
if noniid_ratio == 1.0:
cifar100_training_subset = torch.utils.data.Subset(cifar100_training, list(range(i * (len(cifar100_training)//num_client), (i+1) * (len(cifar100_training)//num_client))))
else:
cifar100_training_subset = DatasetSplit(cifar100_training, cifar100_training_subset_list[i])
subset_training_loader = DataLoader(
cifar100_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar100_training_loader.append(subset_training_loader)
else:
'''1 + collude + (n-2) vanilla clients, all training data is shared by n-1 clients'''
# cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_train)
# subset_training_loader = DataLoader(
# cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# cifar100_training_loader.append(subset_training_loader)
# for i in range(num_client-1):
# cifar100_training_subset = torch.utils.data.Subset(cifar100_training, list(range(i * (len(cifar100_training)//(num_client-1)), (i+1) * (len(cifar100_training)//(num_client-1)))))
# subset_training_loader = DataLoader(
# cifar100_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# cifar100_training_loader.append(subset_training_loader)
# # switch the testloader to collude position
# temp = cifar100_training_loader[0]
# cifar100_training_loader[0] = cifar100_training_loader[1]
# cifar100_training_loader[1] = temp
'''1+ (n-1) * collude, the single client gets all training data'''
subset_training_loader = DataLoader(
cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar100_training_loader.append(subset_training_loader)
cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_train)
for i in range(num_client-1):
subset_training_loader = DataLoader(
cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar100_training_loader.append(subset_training_loader)
cifar100_training2 = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
cifar100_training_mem = torch.utils.data.Subset(cifar100_training2, list(range(0, 5000)))
xmem_training_loader = DataLoader(
cifar100_training_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar100_testing_mem = torch.utils.data.Subset(cifar100_training2, list(range(5000, 10000)))
xmem_testing_loader = DataLoader(
cifar100_testing_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return cifar100_training_loader, xmem_training_loader, xmem_testing_loader
def get_cifar100_testloader(batch_size=16, num_workers=2, shuffle=True, extra_cls_removed_dataset = False, cls_to_remove = 0):
""" return training dataloader
Args:
mean: mean of cifar100 test dataset
std: std of cifar100 test dataset
path: path to cifar100 test python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: cifar100_test_loader:torch dataloader object
"""
transform_train = transforms.Compose([
#transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])
transform_exlabel = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
#cifar100_test = CIFAR100Test(path, transform=transform_test)
cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
cifar100_test_loader = DataLoader(
cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar100_test2 = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_train)
cifar100_training_nomem = torch.utils.data.Subset(cifar100_test2, list(range(0, len(cifar100_test2)//2)))
nomem_training_loader = DataLoader(
cifar100_training_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
cifar100_testing_nomem = torch.utils.data.Subset(cifar100_test2, list(range(len(cifar100_test2)//2, len(cifar100_test2))))
nomem_testing_loader = DataLoader(
cifar100_testing_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
if extra_cls_removed_dataset:
cifar100_training = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_exlabel)
cifar100_cls_rm_loader, cifar100_cls_ex_loader = remove_class_loader(cifar100_training, cls_to_remove, batch_size, num_workers)
return cifar100_test_loader, cifar100_cls_rm_loader, cifar100_cls_ex_loader
return cifar100_test_loader, nomem_training_loader, nomem_testing_loader
def get_SVHN_trainloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False):
""" return training dataloader
Args:
mean: mean of SVHN training dataset
std: std of SVHN training dataset
path: path to SVHN training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD)
])
#cifar00_training = SVHNTrain(path, transform=transform_train)
SVHN_training = torchvision.datasets.SVHN(root='./data', split='train', download=True, transform=transform_train)
if num_client == 1:
SVHN_training_loader = [DataLoader(
SVHN_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)]
elif num_client > 1:
SVHN_training_loader = []
if not collude_use_public:
for i in range(num_client):
SVHN_training_subset = torch.utils.data.Subset(SVHN_training, list(range(i * (len(SVHN_training)//num_client), (i+1) * (len(SVHN_training)//num_client))))
subset_training_loader = DataLoader(
SVHN_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
SVHN_training_loader.append(subset_training_loader)
else:
'''1 + collude + (n-2) vanilla clients, all training data is shared by n-1 clients'''
# SVHN_test = torchvision.datasets.SVHN(root='./data', train=False, download=True, transform=transform_train)
# subset_training_loader = DataLoader(
# SVHN_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# SVHN_training_loader.append(subset_training_loader)
# for i in range(num_client-1):
# SVHN_training_subset = torch.utils.data.Subset(SVHN_training, list(range(i * (len(SVHN_training)//(num_client-1)), (i+1) * (len(SVHN_training)//(num_client-1)))))
# subset_training_loader = DataLoader(
# SVHN_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# SVHN_training_loader.append(subset_training_loader)
# # switch the testloader to collude position
# temp = SVHN_training_loader[0]
# SVHN_training_loader[0] = SVHN_training_loader[1]
# SVHN_training_loader[1] = temp
'''1+ (n-1) * collude, the single client gets all training data'''
subset_training_loader = DataLoader(
SVHN_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
SVHN_training_loader.append(subset_training_loader)
SVHN_test = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_train)
for i in range(num_client-1):
subset_training_loader = DataLoader(
SVHN_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
SVHN_training_loader.append(subset_training_loader)
SVHN_training2 = torchvision.datasets.SVHN(root='./data', split='train', download=True, transform=transform_train)
SVHN_training_mem = torch.utils.data.Subset(SVHN_training2, list(range(0, 5000)))
xmem_training_loader = DataLoader(
SVHN_training_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
SVHN_testing_mem = torch.utils.data.Subset(SVHN_training2, list(range(5000, 10000)))
xmem_testing_loader = DataLoader(
SVHN_testing_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return SVHN_training_loader, xmem_training_loader, xmem_testing_loader
def get_SVHN_testloader(batch_size=16, num_workers=2, shuffle=True, extra_cls_removed_dataset = False, cls_to_remove = 0):
""" return training dataloader
Args:
mean: mean of SVHN test dataset
std: std of SVHN test dataset
path: path to SVHN test python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: SVHN_test_loader:torch dataloader object
"""
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD)
])
transform_exlabel = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
SVHN_test = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test)
print(len(SVHN_test))
SVHN_test_loader = DataLoader(
SVHN_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
SVHN_test2 = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_train)
SVHN_training_nomem = torch.utils.data.Subset(SVHN_test2, list(range(0, len(SVHN_test2)//2)))
nomem_training_loader = DataLoader(
SVHN_training_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
SVHN_testing_nomem = torch.utils.data.Subset(SVHN_test2, list(range(len(SVHN_test2)//2, len(SVHN_test2))))
nomem_testing_loader = DataLoader(
SVHN_testing_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
if extra_cls_removed_dataset:
SVHN_training = torchvision.datasets.SVHN(root='./data', split='train', download=True, transform=transform_exlabel)
SVHN_cls_rm_loader, SVHN_cls_ex_loader = remove_class_loader(SVHN_training, cls_to_remove, batch_size, num_workers)
return SVHN_test_loader, SVHN_cls_rm_loader, SVHN_cls_ex_loader
return SVHN_test_loader, nomem_training_loader, nomem_testing_loader
################
def get_celeba_trainloader(batch_size=16, num_workers=2, shuffle=True, num_client = 1, collude_use_public = False):
""" return training dataloader
Args:
mean: mean of celeba training dataset
std: std of celeba training dataset
path: path to celeba training python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: train_data_loader:torch dataloader object
"""
transform_train = transforms.Compose([
#transforms.ToPILImage(),
transforms.Resize(64),
transforms.CenterCrop(64),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
celeba_training = torchvision.datasets.CelebA(root='./data', train=True, download=True, transform=transform_train)
if num_client == 1:
celeba_training_loader = [DataLoader(
celeba_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)]
elif num_client > 1:
celeba_training_loader = []
if not collude_use_public:
for i in range(num_client):
celeba_training_subset = torch.utils.data.Subset(celeba_training, list(range(i * (len(celeba_training)//num_client), (i+1) * (len(celeba_training)//num_client))))
subset_training_loader = DataLoader(
celeba_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
celeba_training_loader.append(subset_training_loader)
else:
'''1 + collude + (n-2) vanilla clients, all training data is shared by n-1 clients'''
# celeba_test = torchvision.datasets.CelebA(root='./data', train=False, download=True, transform=transform_train)
# subset_training_loader = DataLoader(
# celeba_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# celeba_training_loader.append(subset_training_loader)
# for i in range(num_client-1):
# celeba_training_subset = torch.utils.data.Subset(celeba_training, list(range(i * (len(celeba_training)//(num_client-1)), (i+1) * (len(celeba_training)//(num_client-1)))))
# subset_training_loader = DataLoader(
# celeba_training_subset, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
# celeba_training_loader.append(subset_training_loader)
# # switch the testloader to collude position
# temp = celeba_training_loader[0]
# celeba_training_loader[0] = celeba_training_loader[1]
# celeba_training_loader[1] = temp
'''1+ (n-1) * collude, the single client gets all training data'''
subset_training_loader = DataLoader(
celeba_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
celeba_training_loader.append(subset_training_loader)
celeba_test = torchvision.datasets.CelebA(root='./data', train=False, download=True, transform=transform_train)
for i in range(num_client-1):
subset_training_loader = DataLoader(
celeba_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
celeba_training_loader.append(subset_training_loader)
celeba_training2 = torchvision.datasets.CelebA(root='./data', train=True, download=True, transform=transform_train)
celeba_training_mem = torch.utils.data.Subset(celeba_training2, list(range(0, 5000)))
xmem_training_loader = DataLoader(
celeba_training_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
celeba_testing_mem = torch.utils.data.Subset(celeba_training2, list(range(5000, 10000)))
xmem_testing_loader = DataLoader(
celeba_testing_mem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return celeba_training_loader, xmem_training_loader, xmem_testing_loader
def get_celeba_testloader(batch_size=16, num_workers=2, shuffle=True):
""" return training dataloader
Args:
mean: mean of celeba test dataset
std: std of celeba test dataset
path: path to celeba test python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: celeba_test_loader:torch dataloader object
"""
transform_train = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
#celeba_test = CIFAR100Test(path, transform=transform_test)
celeba_test = torchvision.datasets.CelebA(root='./data', train=False, download=True, transform=transform_test)
celeba_test_loader = DataLoader(
celeba_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
celeba_test2 = torchvision.datasets.CelebA(root='./data', train=False, download=True, transform=transform_train)
celeba_training_nomem = torch.utils.data.Subset(celeba_test2, list(range(0, len(celeba_test2)//2)))
nomem_training_loader = DataLoader(
celeba_training_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
celeba_testing_nomem = torch.utils.data.Subset(celeba_test2, list(range(len(celeba_test2)//2, len(celeba_test2))))
nomem_testing_loader = DataLoader(
celeba_testing_nomem, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return celeba_test_loader, nomem_training_loader, nomem_testing_loader
| [
"numpy.load",
"numpy.random.seed",
"torchvision.datasets.CIFAR10",
"os.path.isfile",
"numpy.arange",
"numpy.tile",
"torch.utils.data.TensorDataset",
"torchvision.datasets.CelebA",
"torchvision.transforms.Normalize",
"os.path.join",
"torchvision.datasets.SVHN",
"torch.utils.data.DataLoader",
... | [((1232, 1258), 'numpy.concatenate', 'np.concatenate', (['image_list'], {}), '(image_list)\n', (1246, 1258), True, 'import numpy as np\n'), ((3164, 3189), 'numpy.vstack', 'np.vstack', (['(idxs, labels)'], {}), '((idxs, labels))\n', (3173, 3189), True, 'import numpy as np\n'), ((4736, 4761), 'numpy.vstack', 'np.vstack', (['(idxs, labels)'], {}), '((idxs, labels))\n', (4745, 4761), True, 'import numpy as np\n'), ((5364, 5427), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)'}), "(root='./data', train=True, download=True)\n", (5385, 5427), True, 'import torchvision.datasets as datasets\n'), ((5440, 5489), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', ([], {'root': '"""./data"""', 'train': '(False)'}), "(root='./data', train=False)\n", (5461, 5489), True, 'import torchvision.datasets as datasets\n'), ((5505, 5525), 'numpy.array', 'np.array', (['xpriv.data'], {}), '(xpriv.data)\n', (5513, 5525), True, 'import numpy as np\n'), ((5540, 5563), 'numpy.array', 'np.array', (['xpriv.targets'], {}), '(xpriv.targets)\n', (5548, 5563), True, 'import numpy as np\n'), ((5577, 5596), 'numpy.array', 'np.array', (['xpub.data'], {}), '(xpub.data)\n', (5585, 5596), True, 'import numpy as np\n'), ((5610, 5632), 'numpy.array', 'np.array', (['xpub.targets'], {}), '(xpub.targets)\n', (5618, 5632), True, 'import numpy as np\n'), ((5724, 5754), 'numpy.tile', 'np.tile', (['x_train', '(1, 3, 1, 1)'], {}), '(x_train, (1, 3, 1, 1))\n', (5731, 5754), True, 'import numpy as np\n'), ((5765, 5794), 'numpy.tile', 'np.tile', (['x_test', '(1, 3, 1, 1)'], {}), '(x_test, (1, 3, 1, 1))\n', (5772, 5794), True, 'import numpy as np\n'), ((5807, 5828), 'torch.Tensor', 'torch.Tensor', (['x_train'], {}), '(x_train)\n', (5819, 5828), False, 'import torch\n'), ((5901, 5921), 'torch.Tensor', 'torch.Tensor', (['x_test'], {}), '(x_test)\n', (5913, 5921), False, 'import torch\n'), ((5993, 6025), 'torch.nn.functional.interpolate', 'F.interpolate', (['x_train', '(32, 32)'], {}), '(x_train, (32, 32))\n', (6006, 6025), True, 'import torch.nn.functional as F\n'), ((6039, 6070), 'torch.nn.functional.interpolate', 'F.interpolate', (['x_test', '(32, 32)'], {}), '(x_test, (32, 32))\n', (6052, 6070), True, 'import torch.nn.functional as F\n'), ((6157, 6187), 'torch.clip', 'torch.clip', (['x_train', '(-1.0)', '(1.0)'], {}), '(x_train, -1.0, 1.0)\n', (6167, 6187), False, 'import torch\n'), ((6199, 6228), 'torch.clip', 'torch.clip', (['x_test', '(-1.0)', '(1.0)'], {}), '(x_test, -1.0, 1.0)\n', (6209, 6228), False, 'import torch\n'), ((6281, 6312), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (6294, 6312), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((6324, 6353), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (6337, 6353), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((6408, 6464), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)'}), "(root='./data', train=True, download=True)\n", (6422, 6464), True, 'import torchvision.datasets as datasets\n'), ((6477, 6519), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""./data"""', 'train': '(False)'}), "(root='./data', train=False)\n", (6491, 6519), True, 'import torchvision.datasets as datasets\n'), ((6535, 6555), 'numpy.array', 'np.array', (['xpriv.data'], {}), '(xpriv.data)\n', (6543, 6555), True, 'import numpy as np\n'), ((6570, 6593), 'numpy.array', 'np.array', (['xpriv.targets'], {}), '(xpriv.targets)\n', (6578, 6593), True, 'import numpy as np\n'), ((6607, 6626), 'numpy.array', 'np.array', (['xpub.data'], {}), '(xpub.data)\n', (6615, 6626), True, 'import numpy as np\n'), ((6640, 6662), 'numpy.array', 'np.array', (['xpub.targets'], {}), '(xpub.targets)\n', (6648, 6662), True, 'import numpy as np\n'), ((6754, 6784), 'numpy.tile', 'np.tile', (['x_train', '(1, 3, 1, 1)'], {}), '(x_train, (1, 3, 1, 1))\n', (6761, 6784), True, 'import numpy as np\n'), ((6795, 6824), 'numpy.tile', 'np.tile', (['x_test', '(1, 3, 1, 1)'], {}), '(x_test, (1, 3, 1, 1))\n', (6802, 6824), True, 'import numpy as np\n'), ((6837, 6858), 'torch.Tensor', 'torch.Tensor', (['x_train'], {}), '(x_train)\n', (6849, 6858), False, 'import torch\n'), ((6931, 6951), 'torch.Tensor', 'torch.Tensor', (['x_test'], {}), '(x_test)\n', (6943, 6951), False, 'import torch\n'), ((7023, 7055), 'torch.nn.functional.interpolate', 'F.interpolate', (['x_train', '(32, 32)'], {}), '(x_train, (32, 32))\n', (7036, 7055), True, 'import torch.nn.functional as F\n'), ((7069, 7100), 'torch.nn.functional.interpolate', 'F.interpolate', (['x_test', '(32, 32)'], {}), '(x_test, (32, 32))\n', (7082, 7100), True, 'import torch.nn.functional as F\n'), ((7187, 7217), 'torch.clip', 'torch.clip', (['x_train', '(-1.0)', '(1.0)'], {}), '(x_train, -1.0, 1.0)\n', (7197, 7217), False, 'import torch\n'), ((7229, 7258), 'torch.clip', 'torch.clip', (['x_test', '(-1.0)', '(1.0)'], {}), '(x_test, -1.0, 1.0)\n', (7239, 7258), False, 'import torch\n'), ((7311, 7342), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (7324, 7342), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((7354, 7383), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (7367, 7383), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((8650, 8760), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['mnist_testing'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(mnist_testing, batch_size=batch_size, shuffle=\n False, num_workers=num_workers)\n', (8677, 8760), False, 'import torch\n'), ((10086, 10197), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['fmnist_testing'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(fmnist_testing, batch_size=batch_size, shuffle=\n False, num_workers=num_workers)\n', (10113, 10197), False, 'import torch\n'), ((11411, 11496), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['"""facescrub-dataset/32x32/train"""'], {'transform': 'transform_train'}), "('facescrub-dataset/32x32/train', transform=transform_train\n )\n", (11431, 11496), True, 'import torchvision.datasets as datasets\n'), ((11516, 11603), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['"""facescrub-dataset/32x32/validate"""'], {'transform': 'transform_test'}), "('facescrub-dataset/32x32/validate', transform=\n transform_test)\n", (11536, 11603), True, 'import torchvision.datasets as datasets\n'), ((12332, 12445), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['facescrub_testing'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(facescrub_testing, batch_size=batch_size,\n shuffle=False, num_workers=num_workers)\n', (12359, 12445), False, 'import torch\n'), ((13660, 13734), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['"""tiny-imagenet-200/train"""'], {'transform': 'transform_train'}), "('tiny-imagenet-200/train', transform=transform_train)\n", (13680, 13734), True, 'import torchvision.datasets as datasets\n'), ((13762, 13833), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['"""tiny-imagenet-200/val"""'], {'transform': 'transform_test'}), "('tiny-imagenet-200/val', transform=transform_test)\n", (13782, 13833), True, 'import torchvision.datasets as datasets\n'), ((14592, 14708), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['tinyimagenet_testing'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(tinyimagenet_testing, batch_size=batch_size,\n shuffle=False, num_workers=num_workers)\n', (14619, 14708), False, 'import torch\n'), ((14996, 15036), 'os.path.join', 'os.path.join', (['DATASET_PATH', 'DATASET_NAME'], {}), '(DATASET_PATH, DATASET_NAME)\n', (15008, 15036), False, 'import os\n'), ((15416, 15458), 'numpy.genfromtxt', 'np.genfromtxt', (['DATASET_FILE'], {'delimiter': '""","""'}), "(DATASET_FILE, delimiter=',')\n", (15429, 15458), True, 'import numpy as np\n'), ((15575, 15628), 'numpy.load', 'np.load', (['"""./dataset_shuffle/random_r_purchase100.npy"""'], {}), "('./dataset_shuffle/random_r_purchase100.npy')\n", (15582, 15628), True, 'import numpy as np\n'), ((16023, 16083), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_classifier_data', 'train_classifier_label'], {}), '(train_classifier_data, train_classifier_label)\n', (16036, 16083), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((16095, 16131), 'torch.utils.data.TensorDataset', 'TensorDataset', (['test_data', 'test_label'], {}), '(test_data, test_label)\n', (16108, 16131), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((16483, 16502), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (16497, 16502), True, 'import numpy as np\n'), ((16547, 16567), 'numpy.arange', 'np.arange', (['train_len'], {}), '(train_len)\n', (16556, 16567), True, 'import numpy as np\n'), ((16572, 16592), 'numpy.random.shuffle', 'np.random.shuffle', (['r'], {}), '(r)\n', (16589, 16592), True, 'import numpy as np\n'), ((16906, 16925), 'numpy.arange', 'np.arange', (['test_len'], {}), '(test_len)\n', (16915, 16925), True, 'import numpy as np\n'), ((16930, 16950), 'numpy.random.shuffle', 'np.random.shuffle', (['r'], {}), '(r)\n', (16947, 16950), True, 'import numpy as np\n'), ((17326, 17402), 'torch.utils.data.DataLoader', 'DataLoader', (['shadow_train'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(shadow_train, batch_size=batch_size, shuffle=True, num_workers=1)\n', (17336, 17402), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((17503, 17578), 'torch.utils.data.DataLoader', 'DataLoader', (['shadow_test'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(shadow_test, batch_size=batch_size, shuffle=True, num_workers=1)\n', (17513, 17578), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((17683, 17759), 'torch.utils.data.DataLoader', 'DataLoader', (['target_train'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(target_train, batch_size=batch_size, shuffle=True, num_workers=1)\n', (17693, 17759), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((17860, 17935), 'torch.utils.data.DataLoader', 'DataLoader', (['target_test'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(target_test, batch_size=batch_size, shuffle=True, num_workers=1)\n', (17870, 17935), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((18965, 19066), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (18993, 19066), False, 'import torchvision\n'), ((19183, 19233), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['cifar10_training', 'indices'], {}), '(cifar10_training, indices)\n', (19206, 19233), False, 'import torch\n'), ((22175, 22276), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (22203, 22276), False, 'import torchvision\n'), ((22393, 22494), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_training_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_training_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (22403, 22494), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((22622, 22722), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_testing_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_testing_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (22632, 22722), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((23924, 24025), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (23952, 24025), False, 'import torchvision\n'), ((24048, 24141), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_test, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (24058, 24141), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((24172, 24274), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=False, download=True,\n transform=transform_train)\n", (24200, 24274), False, 'import torchvision\n'), ((24406, 24509), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_training_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_training_nomem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (24416, 24509), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((24666, 24768), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_testing_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_testing_nomem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (24676, 24768), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((26109, 26211), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (26138, 26211), False, 'import torchvision\n'), ((26335, 26386), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['cifar100_training', 'indices'], {}), '(cifar100_training, indices)\n', (26358, 26386), False, 'import torch\n'), ((29382, 29484), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (29411, 29484), False, 'import torchvision\n'), ((29603, 29705), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_training_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_training_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (29613, 29705), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((29835, 29936), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_testing_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_testing_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (29845, 29936), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((31217, 31319), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (31246, 31319), False, 'import torchvision\n'), ((31343, 31437), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_test, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (31353, 31437), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((31465, 31568), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=False, download=True,\n transform=transform_train)\n", (31494, 31568), False, 'import torchvision\n'), ((31703, 31808), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_training_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_training_nomem, shuffle=shuffle, num_workers=\n num_workers, batch_size=batch_size)\n', (31713, 31808), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((31968, 32071), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_testing_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_testing_nomem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (31978, 32071), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((33378, 33479), 'torchvision.datasets.SVHN', 'torchvision.datasets.SVHN', ([], {'root': '"""./data"""', 'split': '"""train"""', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', split='train', download=True,\n transform=transform_train)\n", (33403, 33479), False, 'import torchvision\n'), ((35993, 36094), 'torchvision.datasets.SVHN', 'torchvision.datasets.SVHN', ([], {'root': '"""./data"""', 'split': '"""train"""', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', split='train', download=True,\n transform=transform_train)\n", (36018, 36094), False, 'import torchvision\n'), ((36205, 36303), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_training_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_training_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (36215, 36303), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((36425, 36522), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_testing_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_testing_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (36435, 36522), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((37657, 37756), 'torchvision.datasets.SVHN', 'torchvision.datasets.SVHN', ([], {'root': '"""./data"""', 'split': '"""test"""', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', split='test', download=True,\n transform=transform_test)\n", (37682, 37756), False, 'import torchvision\n'), ((37802, 37893), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_test, shuffle=shuffle, num_workers=num_workers, batch_size=\n batch_size)\n', (37812, 37893), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((37920, 38020), 'torchvision.datasets.SVHN', 'torchvision.datasets.SVHN', ([], {'root': '"""./data"""', 'split': '"""test"""', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', split='test', download=True,\n transform=transform_train)\n", (37945, 38020), False, 'import torchvision\n'), ((38143, 38243), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_training_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_training_nomem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (38153, 38243), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((38388, 38487), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_testing_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_testing_nomem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (38398, 38487), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((39790, 39890), 'torchvision.datasets.CelebA', 'torchvision.datasets.CelebA', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (39817, 39890), False, 'import torchvision\n'), ((42469, 42569), 'torchvision.datasets.CelebA', 'torchvision.datasets.CelebA', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (42496, 42569), False, 'import torchvision\n'), ((42684, 42784), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_training_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_training_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (42694, 42784), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((42910, 43009), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_testing_mem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_testing_mem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (42920, 43009), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((44029, 44129), 'torchvision.datasets.CelebA', 'torchvision.datasets.CelebA', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (44056, 44129), False, 'import torchvision\n'), ((44151, 44243), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_test, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (44161, 44243), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((44269, 44370), 'torchvision.datasets.CelebA', 'torchvision.datasets.CelebA', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=False, download=True,\n transform=transform_train)\n", (44296, 44370), False, 'import torchvision\n'), ((44499, 44601), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_training_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_training_nomem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (44509, 44601), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((44754, 44855), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_testing_nomem'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_testing_nomem, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (44764, 44855), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((2840, 2867), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (2848, 2867), True, 'import numpy as np\n'), ((4443, 4470), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (4451, 4470), True, 'import numpy as np\n'), ((11160, 11208), 'os.path.isdir', 'os.path.isdir', (['"""./facescrub-dataset/32x32/train"""'], {}), "('./facescrub-dataset/32x32/train')\n", (11173, 11208), False, 'import os\n'), ((11218, 11292), 'os.system', 'os.system', (['"""git clone https://github.com/theothings/facescrub-dataset.git"""'], {}), "('git clone https://github.com/theothings/facescrub-dataset.git')\n", (11227, 11292), False, 'import os\n'), ((11327, 11385), 'subprocess.call', 'subprocess.call', (['"""python prepare_facescrub.py"""'], {'shell': '(True)'}), "('python prepare_facescrub.py', shell=True)\n", (11342, 11385), False, 'import subprocess\n'), ((13492, 13534), 'os.path.isdir', 'os.path.isdir', (['"""./tiny-imagenet-200/train"""'], {}), "('./tiny-imagenet-200/train')\n", (13505, 13534), False, 'import os\n'), ((13570, 13631), 'subprocess.call', 'subprocess.call', (['"""python prepare_tinyimagenet.py"""'], {'shell': '(True)'}), "('python prepare_tinyimagenet.py', shell=True)\n", (13585, 13631), False, 'import subprocess\n'), ((14913, 14940), 'os.path.isdir', 'os.path.isdir', (['DATASET_PATH'], {}), '(DATASET_PATH)\n', (14926, 14940), False, 'import os\n'), ((14950, 14975), 'os.makedirs', 'os.makedirs', (['DATASET_PATH'], {}), '(DATASET_PATH)\n', (14961, 14975), False, 'import os\n'), ((15048, 15076), 'os.path.isfile', 'os.path.isfile', (['DATASET_FILE'], {}), '(DATASET_FILE)\n', (15062, 15076), False, 'import os\n'), ((24838, 24941), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_exlabel'}), "(root='./data', train=True, download=True,\n transform=transform_exlabel)\n", (24866, 24941), False, 'import torchvision\n'), ((32139, 32243), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_exlabel'}), "(root='./data', train=True, download=True,\n transform=transform_exlabel)\n", (32168, 32243), False, 'import torchvision\n'), ((38554, 38657), 'torchvision.datasets.SVHN', 'torchvision.datasets.SVHN', ([], {'root': '"""./data"""', 'split': '"""train"""', 'download': '(True)', 'transform': 'transform_exlabel'}), "(root='./data', split='train', download=True,\n transform=transform_exlabel)\n", (38579, 38657), False, 'import torchvision\n'), ((2198, 2251), 'torch.utils.data.sampler.SubsetRandomSampler', 'torch.utils.data.sampler.SubsetRandomSampler', (['indices'], {}), '(indices)\n', (2242, 2251), False, 'import torch\n'), ((2387, 2449), 'torch.utils.data.sampler.SubsetRandomSampler', 'torch.utils.data.sampler.SubsetRandomSampler', (['excluded_indices'], {}), '(excluded_indices)\n', (2431, 2449), False, 'import torch\n'), ((3361, 3425), 'numpy.random.choice', 'np.random.choice', (['idx_shard', 'num_class_per_client'], {'replace': '(False)'}), '(idx_shard, num_class_per_client, replace=False)\n', (3377, 3425), True, 'import numpy as np\n'), ((3547, 3645), 'numpy.concatenate', 'np.concatenate', (['(dict_users_unlabeled[i], idxs[rand * num_imgs:(rand + 1) * num_imgs])'], {'axis': '(0)'}), '((dict_users_unlabeled[i], idxs[rand * num_imgs:(rand + 1) *\n num_imgs]), axis=0)\n', (3561, 3645), True, 'import numpy as np\n'), ((4933, 4997), 'numpy.random.choice', 'np.random.choice', (['idx_shard', 'num_class_per_client'], {'replace': '(False)'}), '(idx_shard, num_class_per_client, replace=False)\n', (4949, 4997), True, 'import numpy as np\n'), ((5117, 5213), 'numpy.concatenate', 'np.concatenate', (['(dict_users_labeled[i], idxs[rand * num_imgs:(rand + 1) * num_imgs])'], {'axis': '(0)'}), '((dict_users_labeled[i], idxs[rand * num_imgs:(rand + 1) *\n num_imgs]), axis=0)\n', (5131, 5213), True, 'import numpy as np\n'), ((5843, 5864), 'torch.Tensor', 'torch.Tensor', (['y_train'], {}), '(y_train)\n', (5855, 5864), False, 'import torch\n'), ((5935, 5955), 'torch.Tensor', 'torch.Tensor', (['y_test'], {}), '(y_test)\n', (5947, 5955), False, 'import torch\n'), ((6873, 6894), 'torch.Tensor', 'torch.Tensor', (['y_train'], {}), '(y_train)\n', (6885, 6894), False, 'import torch\n'), ((6965, 6985), 'torch.Tensor', 'torch.Tensor', (['y_test'], {}), '(y_test)\n', (6977, 6985), False, 'import torch\n'), ((8006, 8119), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['mnist_training'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(mnist_training, batch_size=batch_size, shuffle=\n shuffle, num_workers=num_workers)\n', (8033, 8119), False, 'import torch\n'), ((9433, 9547), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['fmnist_training'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(fmnist_training, batch_size=batch_size, shuffle\n =shuffle, num_workers=num_workers)\n', (9460, 9547), False, 'import torch\n'), ((10811, 10844), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (10842, 10844), True, 'import torchvision.transforms as transforms\n'), ((10854, 10883), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (10879, 10883), True, 'import torchvision.transforms as transforms\n'), ((10893, 10914), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10912, 10914), True, 'import torchvision.transforms as transforms\n'), ((10924, 10987), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['FACESCRUB_TRAIN_MEAN', 'FACESCRUB_TRAIN_STD'], {}), '(FACESCRUB_TRAIN_MEAN, FACESCRUB_TRAIN_STD)\n', (10944, 10987), True, 'import torchvision.transforms as transforms\n'), ((11046, 11067), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11065, 11067), True, 'import torchvision.transforms as transforms\n'), ((11077, 11140), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['FACESCRUB_TRAIN_MEAN', 'FACESCRUB_TRAIN_STD'], {}), '(FACESCRUB_TRAIN_MEAN, FACESCRUB_TRAIN_STD)\n', (11097, 11140), True, 'import torchvision.transforms as transforms\n'), ((11660, 11776), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['facescrub_training'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(facescrub_training, batch_size=batch_size,\n shuffle=shuffle, num_workers=num_workers)\n', (11687, 11776), False, 'import torch\n'), ((13069, 13090), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (13086, 13090), True, 'import torchvision.transforms as transforms\n'), ((13100, 13133), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (13131, 13133), True, 'import torchvision.transforms as transforms\n'), ((13143, 13172), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (13168, 13172), True, 'import torchvision.transforms as transforms\n'), ((13182, 13203), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (13201, 13203), True, 'import torchvision.transforms as transforms\n'), ((13213, 13282), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['TINYIMAGENET_TRAIN_MEAN', 'TINYIMAGENET_TRAIN_STD'], {}), '(TINYIMAGENET_TRAIN_MEAN, TINYIMAGENET_TRAIN_STD)\n', (13233, 13282), True, 'import torchvision.transforms as transforms\n'), ((13341, 13362), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (13358, 13362), True, 'import torchvision.transforms as transforms\n'), ((13372, 13393), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (13391, 13393), True, 'import torchvision.transforms as transforms\n'), ((13403, 13472), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['TINYIMAGENET_TRAIN_MEAN', 'TINYIMAGENET_TRAIN_STD'], {}), '(TINYIMAGENET_TRAIN_MEAN, TINYIMAGENET_TRAIN_STD)\n', (13423, 13472), True, 'import torchvision.transforms as transforms\n'), ((13899, 14018), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['tinyimagenet_training'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(tinyimagenet_training, batch_size=batch_size,\n shuffle=shuffle, num_workers=num_workers)\n', (13926, 14018), False, 'import torch\n'), ((15219, 15256), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""tmp.tgz"""'], {}), "(DATASET_PATH, 'tmp.tgz')\n", (15231, 15256), False, 'import os\n'), ((15320, 15357), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""tmp.tgz"""'], {}), "(DATASET_PATH, 'tmp.tgz')\n", (15332, 15357), False, 'import os\n'), ((18646, 18682), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (18667, 18682), True, 'import torchvision.transforms as transforms\n'), ((18692, 18725), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (18723, 18725), True, 'import torchvision.transforms as transforms\n'), ((18735, 18764), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (18760, 18764), True, 'import torchvision.transforms as transforms\n'), ((18774, 18795), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (18793, 18795), True, 'import torchvision.transforms as transforms\n'), ((18805, 18864), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['CIFAR10_TRAIN_MEAN', 'CIFAR10_TRAIN_STD'], {}), '(CIFAR10_TRAIN_MEAN, CIFAR10_TRAIN_STD)\n', (18825, 18864), True, 'import torchvision.transforms as transforms\n'), ((19294, 19391), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (19304, 19391), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((23383, 23419), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (23404, 23419), True, 'import torchvision.transforms as transforms\n'), ((23429, 23462), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (23460, 23462), True, 'import torchvision.transforms as transforms\n'), ((23472, 23501), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (23497, 23501), True, 'import torchvision.transforms as transforms\n'), ((23511, 23532), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (23530, 23532), True, 'import torchvision.transforms as transforms\n'), ((23542, 23601), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['CIFAR10_TRAIN_MEAN', 'CIFAR10_TRAIN_STD'], {}), '(CIFAR10_TRAIN_MEAN, CIFAR10_TRAIN_STD)\n', (23562, 23601), True, 'import torchvision.transforms as transforms\n'), ((23660, 23681), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (23679, 23681), True, 'import torchvision.transforms as transforms\n'), ((23691, 23750), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['CIFAR10_TRAIN_MEAN', 'CIFAR10_TRAIN_STD'], {}), '(CIFAR10_TRAIN_MEAN, CIFAR10_TRAIN_STD)\n', (23711, 23750), True, 'import torchvision.transforms as transforms\n'), ((23812, 23833), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (23831, 23833), True, 'import torchvision.transforms as transforms\n'), ((23843, 23897), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (23863, 23897), True, 'import torchvision.transforms as transforms\n'), ((25852, 25888), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (25873, 25888), True, 'import torchvision.transforms as transforms\n'), ((25898, 25931), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (25929, 25931), True, 'import torchvision.transforms as transforms\n'), ((25941, 25970), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (25966, 25970), True, 'import torchvision.transforms as transforms\n'), ((25980, 26001), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (25999, 26001), True, 'import torchvision.transforms as transforms\n'), ((26011, 26072), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['CIFAR100_TRAIN_MEAN', 'CIFAR100_TRAIN_STD'], {}), '(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)\n', (26031, 26072), True, 'import torchvision.transforms as transforms\n'), ((26452, 26550), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (26462, 26550), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((30605, 30641), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (30626, 30641), True, 'import torchvision.transforms as transforms\n'), ((30651, 30684), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (30682, 30684), True, 'import torchvision.transforms as transforms\n'), ((30694, 30723), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (30719, 30723), True, 'import torchvision.transforms as transforms\n'), ((30733, 30754), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (30752, 30754), True, 'import torchvision.transforms as transforms\n'), ((30764, 30825), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['CIFAR100_TRAIN_MEAN', 'CIFAR100_TRAIN_STD'], {}), '(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)\n', (30784, 30825), True, 'import torchvision.transforms as transforms\n'), ((30884, 30905), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (30903, 30905), True, 'import torchvision.transforms as transforms\n'), ((30915, 30976), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['CIFAR100_TRAIN_MEAN', 'CIFAR100_TRAIN_STD'], {}), '(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)\n', (30935, 30976), True, 'import torchvision.transforms as transforms\n'), ((31038, 31059), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (31057, 31059), True, 'import torchvision.transforms as transforms\n'), ((31069, 31123), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (31089, 31123), True, 'import torchvision.transforms as transforms\n'), ((33071, 33107), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (33092, 33107), True, 'import torchvision.transforms as transforms\n'), ((33117, 33150), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (33148, 33150), True, 'import torchvision.transforms as transforms\n'), ((33160, 33189), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (33185, 33189), True, 'import torchvision.transforms as transforms\n'), ((33199, 33220), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (33218, 33220), True, 'import torchvision.transforms as transforms\n'), ((33230, 33283), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['SVHN_TRAIN_MEAN', 'SVHN_TRAIN_STD'], {}), '(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD)\n', (33250, 33283), True, 'import torchvision.transforms as transforms\n'), ((33532, 33626), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (33542, 33626), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((37131, 37167), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (37152, 37167), True, 'import torchvision.transforms as transforms\n'), ((37177, 37210), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (37208, 37210), True, 'import torchvision.transforms as transforms\n'), ((37220, 37249), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (37245, 37249), True, 'import torchvision.transforms as transforms\n'), ((37259, 37280), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (37278, 37280), True, 'import torchvision.transforms as transforms\n'), ((37290, 37343), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['SVHN_TRAIN_MEAN', 'SVHN_TRAIN_STD'], {}), '(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD)\n', (37310, 37343), True, 'import torchvision.transforms as transforms\n'), ((37402, 37423), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (37421, 37423), True, 'import torchvision.transforms as transforms\n'), ((37433, 37486), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['SVHN_TRAIN_MEAN', 'SVHN_TRAIN_STD'], {}), '(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD)\n', (37453, 37486), True, 'import torchvision.transforms as transforms\n'), ((37548, 37569), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (37567, 37569), True, 'import torchvision.transforms as transforms\n'), ((37579, 37633), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (37599, 37633), True, 'import torchvision.transforms as transforms\n'), ((39522, 39543), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64)'], {}), '(64)\n', (39539, 39543), True, 'import torchvision.transforms as transforms\n'), ((39553, 39578), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(64)'], {}), '(64)\n', (39574, 39578), True, 'import torchvision.transforms as transforms\n'), ((39588, 39621), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (39619, 39621), True, 'import torchvision.transforms as transforms\n'), ((39631, 39660), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (39656, 39660), True, 'import torchvision.transforms as transforms\n'), ((39670, 39691), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (39689, 39691), True, 'import torchvision.transforms as transforms\n'), ((39701, 39755), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (39721, 39755), True, 'import torchvision.transforms as transforms\n'), ((39945, 40041), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (39955, 40041), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((43576, 43597), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64)'], {}), '(64)\n', (43593, 43597), True, 'import torchvision.transforms as transforms\n'), ((43607, 43632), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(64)'], {}), '(64)\n', (43628, 43632), True, 'import torchvision.transforms as transforms\n'), ((43642, 43663), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (43661, 43663), True, 'import torchvision.transforms as transforms\n'), ((43673, 43727), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (43693, 43727), True, 'import torchvision.transforms as transforms\n'), ((43787, 43808), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64)'], {}), '(64)\n', (43804, 43808), True, 'import torchvision.transforms as transforms\n'), ((43818, 43843), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(64)'], {}), '(64)\n', (43839, 43843), True, 'import torchvision.transforms as transforms\n'), ((43853, 43874), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (43872, 43874), True, 'import torchvision.transforms as transforms\n'), ((43884, 43938), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (43904, 43938), True, 'import torchvision.transforms as transforms\n'), ((8437, 8539), 'torch.utils.data.DataLoader', 'DataLoader', (['mnist_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(mnist_training_subset, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (8447, 8539), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((9870, 9973), 'torch.utils.data.DataLoader', 'DataLoader', (['fmnist_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(fmnist_training_subset, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (9880, 9973), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((12111, 12213), 'torch.utils.data.DataLoader', 'DataLoader', (['mnist_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(mnist_training_subset, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (12121, 12213), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((14365, 14467), 'torch.utils.data.DataLoader', 'DataLoader', (['mnist_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(mnist_training_subset, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (14375, 14467), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((21565, 21662), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (21575, 21662), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((21770, 21872), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=False, download=True,\n transform=transform_train)\n", (21798, 21872), False, 'import torchvision\n'), ((28764, 28862), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (28774, 28862), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((28972, 29075), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=False, download=True,\n transform=transform_train)\n", (29001, 29075), False, 'import torchvision\n'), ((35403, 35497), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (35413, 35497), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((35599, 35699), 'torchvision.datasets.SVHN', 'torchvision.datasets.SVHN', ([], {'root': '"""./data"""', 'split': '"""test"""', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', split='test', download=True,\n transform=transform_train)\n", (35624, 35699), False, 'import torchvision\n'), ((41865, 41961), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_training'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_training, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (41875, 41961), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((42067, 42168), 'torchvision.datasets.CelebA', 'torchvision.datasets.CelebA', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=False, download=True,\n transform=transform_train)\n", (42094, 42168), False, 'import torchvision\n'), ((20097, 20202), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_training_subset, shuffle=shuffle, num_workers=\n num_workers, batch_size=batch_size)\n', (20107, 20202), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((21952, 22045), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar10_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar10_test, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (21962, 22045), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((27280, 27386), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_training_subset, shuffle=shuffle, num_workers=\n num_workers, batch_size=batch_size)\n', (27290, 27386), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((29155, 29249), 'torch.utils.data.DataLoader', 'DataLoader', (['cifar100_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(cifar100_test, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (29165, 29249), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((33983, 34084), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_training_subset, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (33993, 34084), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((35779, 35870), 'torch.utils.data.DataLoader', 'DataLoader', (['SVHN_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(SVHN_test, shuffle=shuffle, num_workers=num_workers, batch_size=\n batch_size)\n', (35789, 35870), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((40413, 40516), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_training_subset'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_training_subset, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (40423, 40516), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n'), ((42248, 42340), 'torch.utils.data.DataLoader', 'DataLoader', (['celeba_test'], {'shuffle': 'shuffle', 'num_workers': 'num_workers', 'batch_size': 'batch_size'}), '(celeba_test, shuffle=shuffle, num_workers=num_workers,\n batch_size=batch_size)\n', (42258, 42340), False, 'from torch.utils.data import TensorDataset, sampler, DataLoader\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
from datetime import datetime
import os
from pathlib import Path
import numpy as np
import tensorflow as tf
import iris
import iris_input
from iris_flags import *
def eval_once(saver, summary_writer,
top_k_op, summary_op,
weights_vars, dataset,
images, labels):
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[
-1]
else:
print('No checkpoint file found')
return
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
W_fc1_ = np.load(FLAGS.train_dir + '/weights/W_fc1.npy')
W_fc2_ = np.load(FLAGS.train_dir + '/weights/W_fc2.npy')
W_fc3_ = np.load(FLAGS.train_dir + '/weights/W_fc3.npy')
while step < num_iter:
image_batch, label_batch = dataset.get_batch()
predictions = sess.run([top_k_op],
feed_dict={weights_vars[0]: W_fc1_,
weights_vars[1]: W_fc2_,
weights_vars[2]: W_fc3_,
images: image_batch,
labels: label_batch})
true_count += np.sum(predictions)
step += 1
# summary_str = sess.run(summary_op, feed_dict={W_fc: W_fc_}) ## NEW ##
# summary_writer.add_summary(summary_str, step) ## NEW ##
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
return precision
def evaluate():
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
with tf.Graph().as_default() as g:
iris_dataset = iris_input.Iris()
images = tf.placeholder(tf.float32,
[FLAGS.batch_size, iris_input.INPUT_SIZE])
labels = tf.placeholder(tf.int64, [FLAGS.batch_size])
# Build a Graph that computes the logits predictions from the
# inference model.
W_fc1 = tf.placeholder(tf.float32, [iris_input.INPUT_SIZE, 4])
W_fc2 = tf.placeholder(tf.float32, [4, 4])
W_fc3 = tf.placeholder(tf.float32, [4, 3])
weights = [W_fc1, W_fc2, W_fc3]
logits = iris.inference(images, weights)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
# variable_averages = tf.train.ExponentialMovingAverage(
# mnist.MOVING_AVERAGE_DECAY)
# variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver()
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
while True:
precision = eval_once(saver, summary_writer, top_k_op, summary_op,
weights, iris_dataset, images, labels)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
return precision
def evaluate_best():
with tf.Graph().as_default() as g:
dataset = iris_input.Iris()
images = tf.placeholder(tf.float32,
[FLAGS.batch_size, iris_input.INPUT_SIZE])
labels = tf.placeholder(tf.int64, [FLAGS.batch_size])
# Build a Graph that computes the logits predictions from the
# inference model.
W_fc1 = tf.placeholder(tf.float32, [iris_input.INPUT_SIZE, 4])
W_fc2 = tf.placeholder(tf.float32, [4, 4])
W_fc3 = tf.placeholder(tf.float32, [4, 3])
weights = [W_fc1, W_fc2, W_fc3]
logits = iris.inference(images, weights)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt_path = FLAGS.train_dir + '/best_weights/model.ckpt-79'
saver.restore(sess, ckpt_path)
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
W_fc1_ = np.load(FLAGS.train_dir + '/best_weights/W_fc1.npy')
W_fc2_ = np.load(FLAGS.train_dir + '/best_weights/W_fc2.npy')
W_fc3_ = np.load(FLAGS.train_dir + '/best_weights/W_fc3.npy')
while step < num_iter:
image_batch, label_batch = dataset.get_batch()
predictions = sess.run([top_k_op],
feed_dict={W_fc1: W_fc1_,
W_fc2: W_fc2_,
W_fc3: W_fc3_,
images: image_batch,
labels: label_batch})
true_count += np.sum(predictions)
step += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print(precision)
def main(argv=None): # pylint: disable=unused-argument
# mnist.maybe_download_and_extract()
# if tf.gfile.Exists(FLAGS.eval_dir):
# tf.gfile.DeleteRecursively(FLAGS.eval_dir)
# tf.gfile.MakeDirs(FLAGS.eval_dir)
# evaluate()
evaluate_best()
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.gfile.Exists",
"numpy.load",
"numpy.sum",
"iris.inference",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.gfile.DeleteRecursively",
"datetime.datetime.now",
"tensorflow.app.run",
"tensorflow.summary.merge_all",
"tensorflow.train.get_checkpoint_state",
"tens... | [((2177, 2208), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.eval_dir'], {}), '(FLAGS.eval_dir)\n', (2192, 2208), True, 'import tensorflow as tf\n'), ((2265, 2298), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.eval_dir'], {}), '(FLAGS.eval_dir)\n', (2282, 2298), True, 'import tensorflow as tf\n'), ((6150, 6162), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (6160, 6162), True, 'import tensorflow as tf\n'), ((452, 464), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (462, 464), True, 'import tensorflow as tf\n'), ((489, 535), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (518, 535), True, 'import tensorflow as tf\n'), ((1095, 1142), 'numpy.load', 'np.load', (["(FLAGS.train_dir + '/weights/W_fc1.npy')"], {}), "(FLAGS.train_dir + '/weights/W_fc1.npy')\n", (1102, 1142), True, 'import numpy as np\n'), ((1160, 1207), 'numpy.load', 'np.load', (["(FLAGS.train_dir + '/weights/W_fc2.npy')"], {}), "(FLAGS.train_dir + '/weights/W_fc2.npy')\n", (1167, 1207), True, 'import numpy as np\n'), ((1225, 1272), 'numpy.load', 'np.load', (["(FLAGS.train_dir + '/weights/W_fc3.npy')"], {}), "(FLAGS.train_dir + '/weights/W_fc3.npy')\n", (1232, 1272), True, 'import numpy as np\n'), ((2218, 2260), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.eval_dir'], {}), '(FLAGS.eval_dir)\n', (2244, 2260), True, 'import tensorflow as tf\n'), ((2361, 2378), 'iris_input.Iris', 'iris_input.Iris', ([], {}), '()\n', (2376, 2378), False, 'import iris_input\n'), ((2396, 2465), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[FLAGS.batch_size, iris_input.INPUT_SIZE]'], {}), '(tf.float32, [FLAGS.batch_size, iris_input.INPUT_SIZE])\n', (2410, 2465), True, 'import tensorflow as tf\n'), ((2515, 2559), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[FLAGS.batch_size]'], {}), '(tf.int64, [FLAGS.batch_size])\n', (2529, 2559), True, 'import tensorflow as tf\n'), ((2674, 2728), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[iris_input.INPUT_SIZE, 4]'], {}), '(tf.float32, [iris_input.INPUT_SIZE, 4])\n', (2688, 2728), True, 'import tensorflow as tf\n'), ((2745, 2779), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[4, 4]'], {}), '(tf.float32, [4, 4])\n', (2759, 2779), True, 'import tensorflow as tf\n'), ((2796, 2830), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[4, 3]'], {}), '(tf.float32, [4, 3])\n', (2810, 2830), True, 'import tensorflow as tf\n'), ((2888, 2919), 'iris.inference', 'iris.inference', (['images', 'weights'], {}), '(images, weights)\n', (2902, 2919), False, 'import iris\n'), ((2973, 3006), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'labels', '(1)'], {}), '(logits, labels, 1)\n', (2987, 3006), True, 'import tensorflow as tf\n'), ((3294, 3310), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3308, 3310), True, 'import tensorflow as tf\n'), ((3412, 3434), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3432, 3434), True, 'import tensorflow as tf\n'), ((3461, 3501), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.eval_dir', 'g'], {}), '(FLAGS.eval_dir, g)\n', (3482, 3501), True, 'import tensorflow as tf\n'), ((3878, 3895), 'iris_input.Iris', 'iris_input.Iris', ([], {}), '()\n', (3893, 3895), False, 'import iris_input\n'), ((3913, 3982), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[FLAGS.batch_size, iris_input.INPUT_SIZE]'], {}), '(tf.float32, [FLAGS.batch_size, iris_input.INPUT_SIZE])\n', (3927, 3982), True, 'import tensorflow as tf\n'), ((4032, 4076), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[FLAGS.batch_size]'], {}), '(tf.int64, [FLAGS.batch_size])\n', (4046, 4076), True, 'import tensorflow as tf\n'), ((4191, 4245), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[iris_input.INPUT_SIZE, 4]'], {}), '(tf.float32, [iris_input.INPUT_SIZE, 4])\n', (4205, 4245), True, 'import tensorflow as tf\n'), ((4262, 4296), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[4, 4]'], {}), '(tf.float32, [4, 4])\n', (4276, 4296), True, 'import tensorflow as tf\n'), ((4313, 4347), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[4, 3]'], {}), '(tf.float32, [4, 3])\n', (4327, 4347), True, 'import tensorflow as tf\n'), ((4405, 4436), 'iris.inference', 'iris.inference', (['images', 'weights'], {}), '(images, weights)\n', (4419, 4436), False, 'import iris\n'), ((4490, 4523), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'labels', '(1)'], {}), '(logits, labels, 1)\n', (4504, 4523), True, 'import tensorflow as tf\n'), ((4541, 4557), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4555, 4557), True, 'import tensorflow as tf\n'), ((886, 934), 'math.ceil', 'math.ceil', (['(FLAGS.num_examples / FLAGS.batch_size)'], {}), '(FLAGS.num_examples / FLAGS.batch_size)\n', (895, 934), False, 'import math\n'), ((1784, 1803), 'numpy.sum', 'np.sum', (['predictions'], {}), '(predictions)\n', (1790, 1803), True, 'import numpy as np\n'), ((3740, 3776), 'time.sleep', 'time.sleep', (['FLAGS.eval_interval_secs'], {}), '(FLAGS.eval_interval_secs)\n', (3750, 3776), False, 'import time\n'), ((4572, 4584), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4582, 4584), True, 'import tensorflow as tf\n'), ((4962, 5014), 'numpy.load', 'np.load', (["(FLAGS.train_dir + '/best_weights/W_fc1.npy')"], {}), "(FLAGS.train_dir + '/best_weights/W_fc1.npy')\n", (4969, 5014), True, 'import numpy as np\n'), ((5036, 5088), 'numpy.load', 'np.load', (["(FLAGS.train_dir + '/best_weights/W_fc2.npy')"], {}), "(FLAGS.train_dir + '/best_weights/W_fc2.npy')\n", (5043, 5088), True, 'import numpy as np\n'), ((5110, 5162), 'numpy.load', 'np.load', (["(FLAGS.train_dir + '/best_weights/W_fc3.npy')"], {}), "(FLAGS.train_dir + '/best_weights/W_fc3.npy')\n", (5117, 5162), True, 'import numpy as np\n'), ((2308, 2318), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2316, 2318), True, 'import tensorflow as tf\n'), ((3830, 3840), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3838, 3840), True, 'import tensorflow as tf\n'), ((4737, 4785), 'math.ceil', 'math.ceil', (['(FLAGS.num_examples / FLAGS.batch_size)'], {}), '(FLAGS.num_examples / FLAGS.batch_size)\n', (4746, 4785), False, 'import math\n'), ((5680, 5699), 'numpy.sum', 'np.sum', (['predictions'], {}), '(predictions)\n', (5686, 5699), True, 'import numpy as np\n'), ((2103, 2117), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2115, 2117), False, 'from datetime import datetime\n')] |
"""Graphical user interface (:mod:`fluiddyn.util.gui`)
======================================================
"""
try: # Python 3
import tkinter as tk
from tkinter import N, W, E, S, END
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
from tkinter.simpledialog import SimpleDialog
import tkinter.font as font
except ImportError: # Python 2
import Tkinter as tk
from Tkinter import N, W, E, S, END
import ttk
from ScrolledText import ScrolledText
from tkSimpleDialog import Dialog as SimpleDialog
import tkFont as font
import time
import datetime as dt
from fluiddyn._version import __version__
# from fluiddyn.util.daemons import DaemonThread as Daemon
from fluidlab.objects.rotatingobjects import (
create_rotating_objects_kepler,
DaemonRunningRotatingObject,
RotatingObject,
)
class ElapsedTimeClock(ttk.Label):
def __init__(self, parent, *args, **kwargs):
ttk.Label.__init__(self, parent, *args, **kwargs)
self.lasttime = ""
t = time.localtime()
self.zerotime = dt.timedelta(hours=t[3], minutes=t[4], seconds=t[5])
self.tick()
def tick(self):
# get the current local time from the PC
now = dt.datetime(1, 1, 1).now()
elapsedtime = now - self.zerotime
time2 = elapsedtime.strftime("%H:%M:%S")
# if time string has changed, update it
if time2 != self.lasttime:
self.lasttime = time2
self.config(text=time2)
# calls itself every 200 milliseconds
# to update the time display as needed
# could use >200 ms, but display gets jerky
self.after(200, self.tick)
class FrameRotatingObject(ttk.Frame):
"""A simple frame for an object with a write function."""
def __init__(self, master, obj, title=None, **kargs):
self.obj = obj
if title is None:
self.title = obj.name
ttk.Frame.__init__(self, master)
self.create_widgets()
self.grid(sticky=(N, W, E, S), **kargs)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
def create_widgets(self):
label_title = ttk.Label(self, text=self.title, font="TkHeadingFont")
label_title.grid(column=0, row=0, padx=15)
label = ttk.Label(self, text="rotation rate:", font="TkHeadingFont")
label.grid(column=0, row=1, padx=15)
self.stringvar_rr = tk.StringVar()
self.stringvar_rr.set(f"{self.obj.rotation_rate:5.2f}")
self.label_rr = ttk.Label(
self, textvariable=self.stringvar_rr, font="TkHeadingFont"
)
self.label_rr.grid(column=1, row=1, padx=15)
label = ttk.Label(self, text="rad/s", font="TkHeadingFont")
label.grid(column=2, row=1, padx=15)
self._redefine_write()
def _redefine_write(self):
"""Dynamically overwrite the function write of the object."""
def new_write(obj, string):
# print(string)
self.stringvar_rr.set(f"{self.obj.rotation_rate:7.3f}")
# To dynamically overwrite an instance method:
instancemethod = type(self.obj.write)
# info on instancemethod:
# Type: type
# String Form:<type 'instancemethod'>
# Docstring:
# instancemethod(function, instance, class)
# Create an instance method object.
self.obj.write = instancemethod(new_write, self.obj, self.obj.__class__)
class FrameWritingObject(ttk.Frame):
"""A simple frame for an object with a write function."""
def __init__(self, master, obj, title=None, **kargs):
self.obj = obj
if title is None:
self.title = obj.name
ttk.Frame.__init__(self, master)
self.create_widgets()
self.grid(sticky=(N, W, E, S), **kargs)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
def create_widgets(self):
label_title = ttk.Label(self, text=self.title, font="TkHeadingFont")
label_title.grid(column=0, row=0, padx=15)
self.text = ScrolledText(self, height=6)
self.text.grid(column=0, row=1, padx=10, pady=10, sticky=(N, W, E, S))
self._redefine_write()
def _redefine_write(self):
"""Dynamically overwrite the function write of the object."""
def new_write(obj, string):
print(string)
# unstable on Windows!!! bad solution...
# Have to find something better...
# self.text.insert(END, string+'\n')
# self.text.yview_moveto(1)
# To dynamically overwrite an instance method:
instancemethod = type(self.obj.write)
# info on instancemethod:
# Type: type
# String Form:<type 'instancemethod'>
# Docstring:
# instancemethod(function, instance, class)
# Create an instance method object.
self.obj.write = instancemethod(new_write, self.obj, self.obj.__class__)
class MyDialogCloseWindow(SimpleDialog):
def body(self, master):
question = (
"Do you really want to close the window\n" "and stop the experiment?"
)
self.agree = False
ttk.Label(master, text=question).grid()
def apply(self):
self.agree = True
class MainFrameRunExp(ttk.Frame):
""""""
def __init__(self, root=None, exp=None):
if root is None:
root = tk.Tk()
self.root = root
root.protocol("WM_DELETE_WINDOW", self.ask_if_has_to_be_deleted)
if exp is not None:
self._exp = exp
f = font.nametofont("TkDefaultFont")
f.configure(size=10)
f = font.nametofont("TkHeadingFont")
f.configure(size=14)
self.root.title("FluidDyn " + __version__)
ttk.Frame.__init__(self, root, padding="5 5 5 5")
self.grid(column=0, row=0, sticky=(N, W, E, S))
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.frames_objects = {}
self.create_widgets()
def create_widgets(self):
if hasattr(self, "_exp"):
label_exp = ttk.Label(self, text="Experiment:", font="TkHeadingFont")
label_exp.grid(column=0, row=0, sticky=(W))
label_exp = ttk.Label(self, text=self._exp.name_dir)
label_exp.grid(column=0, row=0)
self.clock = ElapsedTimeClock(self, font="TkHeadingFont")
self.clock.grid(column=0, row=0, sticky=(N, E))
# self.button_hi = ttk.Button(self)
# self.button_hi["text"] = "Hello World\n(click me)"
# self.button_hi["command"] = self.say_hi
# self.button_hi.grid(padx=10, pady=10)
# def say_hi(self):
# print("hi there, everyone!")
def add_frame_object(self, obj, **kargs):
""""""
self.frames_objects[obj.name] = FrameWritingObject(self, obj, **kargs)
def add_frame_rotating_object(self, obj, **kargs):
""""""
self.frames_objects[obj.name] = FrameRotatingObject(self, obj, **kargs)
def mainloop(self):
for child in self.winfo_children():
child.grid_configure(padx=10, pady=10)
ttk.Frame.mainloop(self)
def ask_if_has_to_be_deleted(self):
d = MyDialogCloseWindow(self.root)
if d.agree:
self.root.destroy()
if __name__ == "__main__":
from fluidlab.exp.withconductivityprobe import DaemonMeasureProfiles
import numpy as np
def Omega_i(t):
Omega = 1.0
period = 60
return Omega / 2 * (1 - np.cos(2 * np.pi * t / period))
# time_rampe = 10
# t = t/time_rampe
# if t < Omega:
# ret = t*Omega
# elif t < 2*Omega:
# ret = Omega*(2-t)
# else:
# ret = 0
# return ret
R_i = 100
R_o = 482 / 2
rc, rt = create_rotating_objects_kepler(Omega_i, R_i, R_o)
import fluiddyn as fld
exp = fld.load_exp("Exp_Omega1=0.70_N0=1.80_2014-09-01_23-47-47")
mainframe = MainFrameRunExp(exp=exp)
mainframe.add_frame_object(exp.profiles, column=0, row=3)
mainframe.add_frame_object(rc, column=0, row=4)
mainframe.add_frame_object(rt, column=0, row=5)
deamon_profiles = DaemonMeasureProfiles(
exp=exp, duration=600, period=10, speed_measurements=400, speed_up=100
)
daemon_rc = DaemonRunningRotatingObject(rc)
daemon_rt = DaemonRunningRotatingObject(rt)
deamon_profiles.start()
daemon_rc.start()
daemon_rt.start()
mainframe.mainloop()
| [
"fluidlab.objects.rotatingobjects.DaemonRunningRotatingObject",
"ttk.Label",
"ScrolledText.ScrolledText",
"tkFont.nametofont",
"fluidlab.objects.rotatingobjects.create_rotating_objects_kepler",
"Tkinter.Tk",
"fluiddyn.load_exp",
"ttk.Label.__init__",
"datetime.datetime",
"ttk.Frame.mainloop",
"t... | [((7850, 7899), 'fluidlab.objects.rotatingobjects.create_rotating_objects_kepler', 'create_rotating_objects_kepler', (['Omega_i', 'R_i', 'R_o'], {}), '(Omega_i, R_i, R_o)\n', (7880, 7899), False, 'from fluidlab.objects.rotatingobjects import create_rotating_objects_kepler, DaemonRunningRotatingObject, RotatingObject\n'), ((7939, 7998), 'fluiddyn.load_exp', 'fld.load_exp', (['"""Exp_Omega1=0.70_N0=1.80_2014-09-01_23-47-47"""'], {}), "('Exp_Omega1=0.70_N0=1.80_2014-09-01_23-47-47')\n", (7951, 7998), True, 'import fluiddyn as fld\n'), ((8230, 8328), 'fluidlab.exp.withconductivityprobe.DaemonMeasureProfiles', 'DaemonMeasureProfiles', ([], {'exp': 'exp', 'duration': '(600)', 'period': '(10)', 'speed_measurements': '(400)', 'speed_up': '(100)'}), '(exp=exp, duration=600, period=10, speed_measurements=\n 400, speed_up=100)\n', (8251, 8328), False, 'from fluidlab.exp.withconductivityprobe import DaemonMeasureProfiles\n'), ((8355, 8386), 'fluidlab.objects.rotatingobjects.DaemonRunningRotatingObject', 'DaemonRunningRotatingObject', (['rc'], {}), '(rc)\n', (8382, 8386), False, 'from fluidlab.objects.rotatingobjects import create_rotating_objects_kepler, DaemonRunningRotatingObject, RotatingObject\n'), ((8403, 8434), 'fluidlab.objects.rotatingobjects.DaemonRunningRotatingObject', 'DaemonRunningRotatingObject', (['rt'], {}), '(rt)\n', (8430, 8434), False, 'from fluidlab.objects.rotatingobjects import create_rotating_objects_kepler, DaemonRunningRotatingObject, RotatingObject\n'), ((966, 1015), 'ttk.Label.__init__', 'ttk.Label.__init__', (['self', 'parent', '*args'], {}), '(self, parent, *args, **kwargs)\n', (984, 1015), False, 'import ttk\n'), ((1055, 1071), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1069, 1071), False, 'import time\n'), ((1096, 1148), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': 't[3]', 'minutes': 't[4]', 'seconds': 't[5]'}), '(hours=t[3], minutes=t[4], seconds=t[5])\n', (1108, 1148), True, 'import datetime as dt\n'), ((1957, 1989), 'ttk.Frame.__init__', 'ttk.Frame.__init__', (['self', 'master'], {}), '(self, master)\n', (1975, 1989), False, 'import ttk\n'), ((2203, 2257), 'ttk.Label', 'ttk.Label', (['self'], {'text': 'self.title', 'font': '"""TkHeadingFont"""'}), "(self, text=self.title, font='TkHeadingFont')\n", (2212, 2257), False, 'import ttk\n'), ((2326, 2386), 'ttk.Label', 'ttk.Label', (['self'], {'text': '"""rotation rate:"""', 'font': '"""TkHeadingFont"""'}), "(self, text='rotation rate:', font='TkHeadingFont')\n", (2335, 2386), False, 'import ttk\n'), ((2461, 2475), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2473, 2475), True, 'import Tkinter as tk\n'), ((2564, 2633), 'ttk.Label', 'ttk.Label', (['self'], {'textvariable': 'self.stringvar_rr', 'font': '"""TkHeadingFont"""'}), "(self, textvariable=self.stringvar_rr, font='TkHeadingFont')\n", (2573, 2633), False, 'import ttk\n'), ((2726, 2777), 'ttk.Label', 'ttk.Label', (['self'], {'text': '"""rad/s"""', 'font': '"""TkHeadingFont"""'}), "(self, text='rad/s', font='TkHeadingFont')\n", (2735, 2777), False, 'import ttk\n'), ((3757, 3789), 'ttk.Frame.__init__', 'ttk.Frame.__init__', (['self', 'master'], {}), '(self, master)\n', (3775, 3789), False, 'import ttk\n'), ((4003, 4057), 'ttk.Label', 'ttk.Label', (['self'], {'text': 'self.title', 'font': '"""TkHeadingFont"""'}), "(self, text=self.title, font='TkHeadingFont')\n", (4012, 4057), False, 'import ttk\n'), ((4130, 4158), 'ScrolledText.ScrolledText', 'ScrolledText', (['self'], {'height': '(6)'}), '(self, height=6)\n', (4142, 4158), False, 'from ScrolledText import ScrolledText\n'), ((5645, 5677), 'tkFont.nametofont', 'font.nametofont', (['"""TkDefaultFont"""'], {}), "('TkDefaultFont')\n", (5660, 5677), True, 'import tkFont as font\n'), ((5720, 5752), 'tkFont.nametofont', 'font.nametofont', (['"""TkHeadingFont"""'], {}), "('TkHeadingFont')\n", (5735, 5752), True, 'import tkFont as font\n'), ((5842, 5891), 'ttk.Frame.__init__', 'ttk.Frame.__init__', (['self', 'root'], {'padding': '"""5 5 5 5"""'}), "(self, root, padding='5 5 5 5')\n", (5860, 5891), False, 'import ttk\n'), ((7202, 7226), 'ttk.Frame.mainloop', 'ttk.Frame.mainloop', (['self'], {}), '(self)\n', (7220, 7226), False, 'import ttk\n'), ((5469, 5476), 'Tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (5474, 5476), True, 'import Tkinter as tk\n'), ((6183, 6240), 'ttk.Label', 'ttk.Label', (['self'], {'text': '"""Experiment:"""', 'font': '"""TkHeadingFont"""'}), "(self, text='Experiment:', font='TkHeadingFont')\n", (6192, 6240), False, 'import ttk\n'), ((6322, 6362), 'ttk.Label', 'ttk.Label', (['self'], {'text': 'self._exp.name_dir'}), '(self, text=self._exp.name_dir)\n', (6331, 6362), False, 'import ttk\n'), ((1253, 1273), 'datetime.datetime', 'dt.datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1264, 1273), True, 'import datetime as dt\n'), ((5243, 5275), 'ttk.Label', 'ttk.Label', (['master'], {'text': 'question'}), '(master, text=question)\n', (5252, 5275), False, 'import ttk\n'), ((7583, 7613), 'numpy.cos', 'np.cos', (['(2 * np.pi * t / period)'], {}), '(2 * np.pi * t / period)\n', (7589, 7613), True, 'import numpy as np\n')] |
### model1: 3levels on CNN (4layers in each)(takes 4input)
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.merge import concatenate
class cnn:
def __init__(self,n_timesteps,n_features,n_outputs,weights=None):
self.n_timesteps=n_timesteps
self.n_features=n_features
self.n_outputs=n_outputs
self.class_weight=weights
##Level_1
# layer 1
inputs1_1= Input(shape=(n_timesteps,n_features))##128,9
conv1_1 = Conv1D(filters=128, kernel_size=3, activation='relu')(inputs1_1) ##none,126,128
# layer 2
inputs1_2= Input(shape=(n_timesteps,n_features))
conv1_2 = Conv1D(filters=128, kernel_size=5, activation='relu')(inputs1_2)##124,128
# layer 3
inputs1_3= Input(shape=(n_timesteps,n_features))
conv1_3 = Conv1D(filters=128, kernel_size=7, activation='relu')(inputs1_3)##122,128
# layer 4
inputs1_4= Input(shape=(n_timesteps,n_features))
conv1_4 = Conv1D(filters=128, kernel_size=9, activation='relu')(inputs1_4)##120,128
# merge1
merged_1 = concatenate([conv1_1,conv1_2,conv1_3,conv1_4],axis=1)
#maxpool1
pool_1=MaxPooling1D(pool_size=5)(merged_1)
##Level_2
# layer 1
conv2_1 = Conv1D(filters=64, kernel_size=3, activation='relu')(pool_1)
# layer 2
conv2_2 = Conv1D(filters=64, kernel_size=5, activation='relu')(pool_1)
# layer 3
conv2_3 = Conv1D(filters=64, kernel_size=7, activation='relu')(pool_1)
# layer 4
conv2_4 = Conv1D(filters=64, kernel_size=9, activation='relu')(pool_1)
# merge2
merged_2 = concatenate([conv2_1,conv2_2,conv2_3,conv2_4],axis=1)
#maxpool2
pool_2=MaxPooling1D(pool_size=5)(merged_2)
##Level_3
# layer 1
conv3_1 = Conv1D(filters=32, kernel_size=3, activation='relu')(pool_2)
# layer 2
conv3_2 = Conv1D(filters=32, kernel_size=5, activation='relu')(pool_2)
# layer 3
conv3_3 = Conv1D(filters=32, kernel_size=7, activation='relu')(pool_2)
# layer 4
conv3_4 = Conv1D(filters=32, kernel_size=9, activation='relu')(pool_2)
# merge2
merged_3 = concatenate([conv3_1,conv3_2,conv3_3,conv3_4],axis=1)
#maxpool2
pool_3=MaxPooling1D(pool_size=5)(merged_3)
#flatten
flat_cnn=Flatten()(pool_3)
##dense layer
dense = Dense(512, activation='relu')(flat_cnn)
outputs = Dense(n_outputs, activation='softmax')(dense)
##MODEL
self.cnn3_model = Model([inputs1_1, inputs1_2, inputs1_3,inputs1_4], outputs)
def do_compile(self,trainX,testX,trainy_one_hot,testy_one_hot):
self.cnn3_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
model_history=self.cnn3_model.fit(x=[trainX,trainX,trainX,trainX], y=trainy_one_hot, epochs=30, batch_size=64,class_weight=self.class_weight,validation_data= ([testX,testX,testX,testX],testy_one_hot))
self.cnn3_model.summary()
return self.cnn3_model
def prediction(self,testX):
predy=self.cnn3_model.predict([testX,testX,testX,testX])
predy=np.argmax(predy, axis=-1)
return predy
| [
"numpy.argmax",
"keras.layers.merge.concatenate",
"keras.layers.convolutional.MaxPooling1D",
"keras.models.Model",
"keras.layers.Flatten",
"keras.layers.Dense",
"keras.layers.convolutional.Conv1D",
"keras.layers.Input"
] | [((656, 694), 'keras.layers.Input', 'Input', ([], {'shape': '(n_timesteps, n_features)'}), '(shape=(n_timesteps, n_features))\n', (661, 694), False, 'from keras.layers import Input\n'), ((840, 878), 'keras.layers.Input', 'Input', ([], {'shape': '(n_timesteps, n_features)'}), '(shape=(n_timesteps, n_features))\n', (845, 878), False, 'from keras.layers import Input\n'), ((1011, 1049), 'keras.layers.Input', 'Input', ([], {'shape': '(n_timesteps, n_features)'}), '(shape=(n_timesteps, n_features))\n', (1016, 1049), False, 'from keras.layers import Input\n'), ((1182, 1220), 'keras.layers.Input', 'Input', ([], {'shape': '(n_timesteps, n_features)'}), '(shape=(n_timesteps, n_features))\n', (1187, 1220), False, 'from keras.layers import Input\n'), ((1353, 1410), 'keras.layers.merge.concatenate', 'concatenate', (['[conv1_1, conv1_2, conv1_3, conv1_4]'], {'axis': '(1)'}), '([conv1_1, conv1_2, conv1_3, conv1_4], axis=1)\n', (1364, 1410), False, 'from keras.layers.merge import concatenate\n'), ((1925, 1982), 'keras.layers.merge.concatenate', 'concatenate', (['[conv2_1, conv2_2, conv2_3, conv2_4]'], {'axis': '(1)'}), '([conv2_1, conv2_2, conv2_3, conv2_4], axis=1)\n', (1936, 1982), False, 'from keras.layers.merge import concatenate\n'), ((2495, 2552), 'keras.layers.merge.concatenate', 'concatenate', (['[conv3_1, conv3_2, conv3_3, conv3_4]'], {'axis': '(1)'}), '([conv3_1, conv3_2, conv3_3, conv3_4], axis=1)\n', (2506, 2552), False, 'from keras.layers.merge import concatenate\n'), ((2875, 2935), 'keras.models.Model', 'Model', (['[inputs1_1, inputs1_2, inputs1_3, inputs1_4]', 'outputs'], {}), '([inputs1_1, inputs1_2, inputs1_3, inputs1_4], outputs)\n', (2880, 2935), False, 'from keras.models import Model\n'), ((3529, 3554), 'numpy.argmax', 'np.argmax', (['predy'], {'axis': '(-1)'}), '(predy, axis=-1)\n', (3538, 3554), True, 'import numpy as np\n'), ((719, 772), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=128, kernel_size=3, activation='relu')\n", (725, 772), False, 'from keras.layers.convolutional import Conv1D\n'), ((896, 949), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(128)', 'kernel_size': '(5)', 'activation': '"""relu"""'}), "(filters=128, kernel_size=5, activation='relu')\n", (902, 949), False, 'from keras.layers.convolutional import Conv1D\n'), ((1067, 1120), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(128)', 'kernel_size': '(7)', 'activation': '"""relu"""'}), "(filters=128, kernel_size=7, activation='relu')\n", (1073, 1120), False, 'from keras.layers.convolutional import Conv1D\n'), ((1238, 1291), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(128)', 'kernel_size': '(9)', 'activation': '"""relu"""'}), "(filters=128, kernel_size=9, activation='relu')\n", (1244, 1291), False, 'from keras.layers.convolutional import Conv1D\n'), ((1445, 1470), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(5)'}), '(pool_size=5)\n', (1457, 1470), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((1535, 1587), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, activation='relu')\n", (1541, 1587), False, 'from keras.layers.convolutional import Conv1D\n'), ((1632, 1684), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(5)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=5, activation='relu')\n", (1638, 1684), False, 'from keras.layers.convolutional import Conv1D\n'), ((1729, 1781), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(7)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=7, activation='relu')\n", (1735, 1781), False, 'from keras.layers.convolutional import Conv1D\n'), ((1827, 1879), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(9)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=9, activation='relu')\n", (1833, 1879), False, 'from keras.layers.convolutional import Conv1D\n'), ((2013, 2038), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(5)'}), '(pool_size=5)\n', (2025, 2038), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((2105, 2157), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, activation='relu')\n", (2111, 2157), False, 'from keras.layers.convolutional import Conv1D\n'), ((2202, 2254), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=5, activation='relu')\n", (2208, 2254), False, 'from keras.layers.convolutional import Conv1D\n'), ((2299, 2351), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(7)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=7, activation='relu')\n", (2305, 2351), False, 'from keras.layers.convolutional import Conv1D\n'), ((2397, 2449), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(9)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=9, activation='relu')\n", (2403, 2449), False, 'from keras.layers.convolutional import Conv1D\n'), ((2583, 2608), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(5)'}), '(pool_size=5)\n', (2595, 2608), False, 'from keras.layers.convolutional import MaxPooling1D\n'), ((2655, 2664), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2662, 2664), False, 'from keras.layers import Flatten\n'), ((2720, 2749), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (2725, 2749), False, 'from keras.layers import Dense\n'), ((2778, 2816), 'keras.layers.Dense', 'Dense', (['n_outputs'], {'activation': '"""softmax"""'}), "(n_outputs, activation='softmax')\n", (2783, 2816), False, 'from keras.layers import Dense\n')] |
#!/usr/bin/python
import sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def main():
if len(sys.argv)>1:
file_name = str(sys.argv[1])
else:
file_name = 'wyniki.csv'
data = pd.read_csv(file_name,usecols=['graph_name','calculated_path_weight','defined_path_weight','time','alpha'])
data = np.array(data)
X = [data[i][4] for i in range(len(data))]
Y_time = [data[i][3] for i in range(len(data))]
Y_quality = [100*(data[i][1]-data[i][2])/data[i][1] for i in range(len(data))]
col1 = 'steelblue'
col2 = 'red'
fig,ax = plt.subplots()
ax.plot(X,Y_quality,color=col1,marker='o',linewidth=3)
ax.set_ylabel('Stosunek błędu do wartości optymalnej [%]',color=col1)
ax.set_xlabel('Współczynnik alfa')
ax2 = ax.twinx()
ax2.plot(X,Y_time,color=col2,marker='o',linewidth=3)
ax2.set_ylabel('Czas wykonania algorytmu [s]',color=col2)
for i in range(len(data)):
ax.annotate(''+str(data[i][4]),(X[i],Y_quality[i]), rotation=45)
for i in range(len(data)):
ax2.annotate(''+str(data[i][4]),(X[i],Y_time[i]), rotation=45)
plt.show()
if __name__ == "__main__":
main()
| [
"pandas.read_csv",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((233, 349), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'usecols': "['graph_name', 'calculated_path_weight', 'defined_path_weight', 'time', 'alpha'\n ]"}), "(file_name, usecols=['graph_name', 'calculated_path_weight',\n 'defined_path_weight', 'time', 'alpha'])\n", (244, 349), True, 'import pandas as pd\n'), ((352, 366), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (360, 366), True, 'import numpy as np\n'), ((602, 616), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (614, 616), True, 'from matplotlib import pyplot as plt\n'), ((1144, 1154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1152, 1154), True, 'from matplotlib import pyplot as plt\n')] |
import os
import sys
import inspect
import itertools
import warnings
import numpy as np
from numpy import ma
import pandas as pd
import xarray as xr
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_date
from scipy import ndimage as ndi
import argparse
parser = argparse.ArgumentParser(description="""Validate detected DCCs using GOES-16 GLM data""")
parser.add_argument('file', help='File to validate', type=str)
parser.add_argument('-margin', help='Tolerance margin for validation (in pixels)', default=10, type=int)
parser.add_argument('-gd', help='GOES directory',
default='../data/GOES16', type=str)
parser.add_argument('-sd', help='Directory to save output files',
default='../data/dcc_detect', type=str)
parser.add_argument('-cglm', help='clobber existing glm files', action="store_true")
args = parser.parse_args()
file = args.file
margin = args.margin
clobber_glm = args.cglm
goes_data_path = args.gd
if not os.path.isdir(goes_data_path):
try:
os.makedirs(goes_data_path)
except (FileExistsError, OSError):
pass
save_dir = args.sd
# if args.extend_path:
# save_dir = os.path.join(save_dir, start_date.strftime('%Y/%m/%d'))
if not os.path.isdir(save_dir):
try:
os.makedirs(save_dir)
except (FileExistsError, OSError):
pass
# def validation(file, margin, goes_data_path, save_dir):
if True:
"""
Validation process for detected DCCs in the given file
"""
from tobac_flow import io, abi, glm
from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray
from tobac_flow.analysis import filter_labels_by_length, filter_labels_by_length_and_mask, apply_func_to_labels
from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance
print(datetime.now(),'Loading detected DCCs', flush=True)
print(file, flush=True)
detection_ds = xr.open_dataset(file)
validation_ds = xr.Dataset()
dates = pd.date_range(detection_ds.t.data[0], detection_ds.t.data[-1], freq='H', closed='left').to_pydatetime()
glm_save_name = 'gridded_glm_flashes_%s.nc' % (dates[0].strftime('%Y%m%d_%H0000'))
glm_save_path = os.path.join(save_dir, glm_save_name)
validation_save_name = 'validation_dccs_%s.nc' % (dates[0].strftime('%Y%m%d_%H0000'))
validation_save_path = os.path.join(save_dir, validation_save_name)
"""
Start validation
"""
if os.path.exists(glm_save_path) and not clobber_glm:
print(datetime.now(), 'Loading from %s' % (glm_save_path), flush=True)
gridded_flash_ds = xr.open_dataset(glm_save_path)
glm_grid = gridded_flash_ds.glm_flashes
else:
gridded_flash_ds = create_new_goes_ds(detection_ds)
print(datetime.now(),'Processing GLM data', flush=True)
# Get GLM data
# Process new GLM data
glm_files = io.find_glm_files(dates, satellite=16, save_dir=goes_data_path,
replicate_path=True, check_download=True,
n_attempts=1, download_missing=True, verbose=True,
min_storage=2**30)
glm_files = {io.get_goes_date(i):i for i in glm_files}
print('%d files found'%len(glm_files), flush=True)
if len(glm_files)==0:
raise ValueError("No GLM Files discovered, skipping validation")
else:
print(datetime.now(),'Regridding GLM data', flush=True)
glm_grid = glm.regrid_glm(glm_files, gridded_flash_ds, corrected=False)
add_dataarray_to_ds(create_dataarray(glm_grid.data, ('t','y','x'), "glm_flashes",
long_name="number of flashes detected by GLM", units="",
dtype=np.int32), gridded_flash_ds)
add_dataarray_to_ds(create_dataarray(np.sum(glm_grid.data), tuple(), "glm_flash_count",
long_name="total number of GLM flashes",
dtype=np.int32), gridded_flash_ds)
print(datetime.now(), 'Saving to %s' % (glm_save_path), flush=True)
gridded_flash_ds.to_netcdf(glm_save_path)
print(datetime.now(),'Calculating marker distances', flush=True)
marker_distance = get_marker_distance(detection_ds.core_label.data, time_range=3)
anvil_distance = get_marker_distance(detection_ds.thick_anvil_label, time_range=3)
glm_distance = get_marker_distance(glm_grid, time_range=3)
wvd_distance = get_marker_distance(detection_ds.wvd_label, time_range=3)
# Create an array to filter objects near to boundaries
edge_filter_array = np.full(marker_distance.shape, 1).astype('bool')
edge_filter_array[:3] = 0
edge_filter_array[-3:] = 0
edge_filter_array[:,:10] = 0
edge_filter_array[:,-10:] = 0
edge_filter_array[:,:,:10] = 0
edge_filter_array[:,:,-10:] = 0
# Filter objects near to missing glm data
wh_missing_glm = ndi.binary_dilation(glm_grid==-1, iterations=3)
edge_filter_array[wh_missing_glm] = 0
flash_distance_to_marker = np.repeat(marker_distance.ravel(), (glm_grid.data.astype(int)*edge_filter_array.astype(int)).ravel())
flash_distance_to_wvd = np.repeat(wvd_distance.ravel(), (glm_grid.data.astype(int)*edge_filter_array.astype(int)).ravel())
flash_distance_to_anvil = np.repeat(anvil_distance.ravel(), (glm_grid.data.astype(int)*edge_filter_array.astype(int)).ravel())
n_glm_in_margin = np.sum(glm_grid.data*edge_filter_array.astype(int))
print(datetime.now(), 'Validating detection accuracy', flush=True)
# Calculate probability of detection for each case
if n_glm_in_margin>0:
growth_pod = np.sum(flash_distance_to_marker<=10)/n_glm_in_margin
growth_pod_hist = np.histogram(flash_distance_to_marker, bins=40,
range=[0,40])[0] / n_glm_in_margin
wvd_pod = np.sum(flash_distance_to_wvd<=10)/n_glm_in_margin
wvd_pod_hist = np.histogram(flash_distance_to_wvd, bins=40,
range=[0,40])[0] / n_glm_in_margin
anvil_pod = np.sum(flash_distance_to_anvil<=10)/n_glm_in_margin
anvil_pod_hist = np.histogram(flash_distance_to_anvil, bins=40,
range=[0,40])[0] / n_glm_in_margin
else:
growth_pod = np.float64(np.nan)
growth_pod_hist = np.zeros([40])
wvd_pod = np.float64(np.nan)
wvd_pod_hist = np.zeros([40])
anvil_pod = np.float64(np.nan)
anvil_pod_hist = np.zeros([40])
# Calculate false alarm rate
growth_margin_flag = apply_func_to_labels(detection_ds.core_label.data,
edge_filter_array, np.nanmin).astype('bool')
n_growth_in_margin = np.sum(growth_margin_flag)
growth_min_distance = get_min_dist_for_objects(glm_distance, detection_ds.core_label.data)[0]
if n_growth_in_margin>0:
growth_far = np.sum(growth_min_distance[growth_margin_flag]>10) / n_growth_in_margin
growth_far_hist = np.histogram(growth_min_distance[growth_margin_flag], bins=40,
range=[0,40])[0] / n_growth_in_margin
else:
growth_far = np.float64(np.nan)
growth_far_hist = np.zeros([40])
wvd_margin_flag = apply_func_to_labels(detection_ds.wvd_label.data,
edge_filter_array, np.nanmin).astype('bool')
n_wvd_in_margin = np.sum(wvd_margin_flag)
wvd_min_distance = get_min_dist_for_objects(glm_distance, detection_ds.wvd_label.data)[0]
if n_wvd_in_margin>0:
wvd_far = np.sum(wvd_min_distance[wvd_margin_flag]>10) / n_wvd_in_margin
wvd_far_hist = np.histogram(wvd_min_distance[wvd_margin_flag], bins=40,
range=[0,40])[0] / n_wvd_in_margin
else:
wvd_far = np.float64(np.nan)
wvd_far_hist = np.zeros([40])
anvil_margin_flag = apply_func_to_labels(detection_ds.thick_anvil_label.data,
edge_filter_array, np.nanmin).astype('bool')
n_anvil_in_margin = np.sum(anvil_margin_flag)
anvil_min_distance = get_min_dist_for_objects(glm_distance, detection_ds.thick_anvil_label.data)[0]
if n_anvil_in_margin>0:
anvil_far = np.sum(anvil_min_distance[anvil_margin_flag]>10) / n_anvil_in_margin
anvil_far_hist = np.histogram(anvil_min_distance[anvil_margin_flag], bins=40,
range=[0,40])[0] / n_anvil_in_margin
else:
anvil_far = np.float64(np.nan)
anvil_far_hist = np.zeros([40])
print('markers:', flush=True)
print('n =', n_growth_in_margin, flush=True)
print('POD =', growth_pod, flush=True)
print('FAR = ', growth_far, flush=True)
print('WVD:', flush=True)
print('n =', n_wvd_in_margin, flush=True)
print('POD =', wvd_pod, flush=True)
print('FAR = ', wvd_far, flush=True)
print('anvil:', flush=True)
print('n =', n_anvil_in_margin, flush=True)
print('POD =', anvil_pod, flush=True)
print('FAR = ', anvil_far, flush=True)
print('total GLM flashes: ', np.sum(glm_grid.data), flush=True)
print('total in margin: ', n_glm_in_margin, flush=True)
"""
Finish validation
"""
# GLM validation
add_dataarray_to_ds(create_dataarray(flash_distance_to_marker, ('flash',), "flash_core_distance",
long_name="closest distance from flash to detected core",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(flash_distance_to_wvd, ('flash',), "flash_wvd_distance",
long_name="closest distance from flash to detected wvd region",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(flash_distance_to_anvil, ('flash',), "flash_anvil_distance",
long_name="closest distance from flash to detected anvil",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(n_glm_in_margin, tuple(), "flash_count",
long_name="total number of flashes inside margin",
dtype=np.int32), validation_ds)
# anvil validation
add_dataarray_to_ds(create_dataarray(anvil_min_distance, ('anvil',), "anvil_glm_distance",
long_name="closest distance from anvil to GLM flash",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(anvil_margin_flag, ('anvil',), "anvil_margin_flag",
long_name="margin flag for anvil",
dtype=bool), validation_ds)
add_dataarray_to_ds(create_dataarray(anvil_far_hist, ('bins',), "anvil_far_histogram",
long_name="FAR histogram for anvils",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(anvil_pod_hist, ('bins',), "anvil_pod_histogram",
long_name="POD histogram for anvils",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(anvil_pod, tuple(), "anvil_pod",
long_name="POD for anvils",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(anvil_far, tuple(), "anvil_far",
long_name="FAR for anvils",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(n_anvil_in_margin, tuple(), "anvil_count",
long_name="total number of anvils inside margin",
dtype=np.int32), validation_ds)
# wvd validation
add_dataarray_to_ds(create_dataarray(wvd_min_distance, ('wvd',), "wvd_glm_distance",
long_name="closest distance from wvd to GLM flash",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(wvd_margin_flag, ('wvd',), "wvd_margin_flag",
long_name="margin flag for wvd",
dtype=bool), validation_ds)
add_dataarray_to_ds(create_dataarray(wvd_far_hist, ('bins',), "wvd_far_histogram",
long_name="FAR histogram for wvds",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(wvd_pod_hist, ('bins',), "wvd_pod_histogram",
long_name="POD histogram for wvds",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(wvd_pod, tuple(), "wvd_pod",
long_name="POD for wvds",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(wvd_far, tuple(), "wvd_far",
long_name="FAR for wvds",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(n_wvd_in_margin, tuple(), "wvd_count",
long_name="total number of wvds inside margin",
dtype=np.int32), validation_ds)
# growth validation
add_dataarray_to_ds(create_dataarray(growth_min_distance, ('core',), "core_glm_distance",
long_name="closest distance from core to GLM flash",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(growth_margin_flag, ('core',), "core_margin_flag",
long_name="margin flag for core",
dtype=bool), validation_ds)
add_dataarray_to_ds(create_dataarray(growth_far_hist, ('bins',), "core_far_histogram",
long_name="FAR histogram for cores",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(growth_pod_hist, ('bins',), "core_pod_histogram",
long_name="POD histogram for cores",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(growth_pod, tuple(), "core_pod",
long_name="POD for cores",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(growth_far, tuple(), "core_far",
long_name="FAR for cores",
dtype=np.float32), validation_ds)
add_dataarray_to_ds(create_dataarray(n_growth_in_margin, tuple(), "core_count",
long_name="total number of cores inside margin",
dtype=np.int32), validation_ds)
print(datetime.now(), 'Saving to %s' % (validation_save_path), flush=True)
validation_ds.to_netcdf(validation_save_path)
# if __name__=='__main__':
# validation(file, margin, goes_data_path, save_dir)
| [
"tobac_flow.glm.regrid_glm",
"numpy.sum",
"argparse.ArgumentParser",
"tobac_flow.io.get_goes_date",
"tobac_flow.validation.get_min_dist_for_objects",
"numpy.histogram",
"tobac_flow.io.find_glm_files",
"numpy.float64",
"tobac_flow.validation.get_marker_distance",
"os.path.join",
"numpy.full",
"... | [((298, 387), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Validate detected DCCs using GOES-16 GLM data"""'}), "(description=\n 'Validate detected DCCs using GOES-16 GLM data')\n", (321, 387), False, 'import argparse\n'), ((996, 1025), 'os.path.isdir', 'os.path.isdir', (['goes_data_path'], {}), '(goes_data_path)\n', (1009, 1025), False, 'import os\n'), ((1247, 1270), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (1260, 1270), False, 'import os\n'), ((2000, 2021), 'xarray.open_dataset', 'xr.open_dataset', (['file'], {}), '(file)\n', (2015, 2021), True, 'import xarray as xr\n'), ((2042, 2054), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (2052, 2054), True, 'import xarray as xr\n'), ((2280, 2317), 'os.path.join', 'os.path.join', (['save_dir', 'glm_save_name'], {}), '(save_dir, glm_save_name)\n', (2292, 2317), False, 'import os\n'), ((2435, 2479), 'os.path.join', 'os.path.join', (['save_dir', 'validation_save_name'], {}), '(save_dir, validation_save_name)\n', (2447, 2479), False, 'import os\n'), ((4410, 4473), 'tobac_flow.validation.get_marker_distance', 'get_marker_distance', (['detection_ds.core_label.data'], {'time_range': '(3)'}), '(detection_ds.core_label.data, time_range=3)\n', (4429, 4473), False, 'from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance\n'), ((4495, 4560), 'tobac_flow.validation.get_marker_distance', 'get_marker_distance', (['detection_ds.thick_anvil_label'], {'time_range': '(3)'}), '(detection_ds.thick_anvil_label, time_range=3)\n', (4514, 4560), False, 'from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance\n'), ((4580, 4623), 'tobac_flow.validation.get_marker_distance', 'get_marker_distance', (['glm_grid'], {'time_range': '(3)'}), '(glm_grid, time_range=3)\n', (4599, 4623), False, 'from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance\n'), ((4643, 4700), 'tobac_flow.validation.get_marker_distance', 'get_marker_distance', (['detection_ds.wvd_label'], {'time_range': '(3)'}), '(detection_ds.wvd_label, time_range=3)\n', (4662, 4700), False, 'from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance\n'), ((5101, 5150), 'scipy.ndimage.binary_dilation', 'ndi.binary_dilation', (['(glm_grid == -1)'], {'iterations': '(3)'}), '(glm_grid == -1, iterations=3)\n', (5120, 5150), True, 'from scipy import ndimage as ndi\n'), ((6928, 6954), 'numpy.sum', 'np.sum', (['growth_margin_flag'], {}), '(growth_margin_flag)\n', (6934, 6954), True, 'import numpy as np\n'), ((7616, 7639), 'numpy.sum', 'np.sum', (['wvd_margin_flag'], {}), '(wvd_margin_flag)\n', (7622, 7639), True, 'import numpy as np\n'), ((8271, 8296), 'numpy.sum', 'np.sum', (['anvil_margin_flag'], {}), '(anvil_margin_flag)\n', (8277, 8296), True, 'import numpy as np\n'), ((1044, 1071), 'os.makedirs', 'os.makedirs', (['goes_data_path'], {}), '(goes_data_path)\n', (1055, 1071), False, 'import os\n'), ((1289, 1310), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1300, 1310), False, 'import os\n'), ((1901, 1915), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1913, 1915), False, 'from datetime import datetime, timedelta\n'), ((2525, 2554), 'os.path.exists', 'os.path.exists', (['glm_save_path'], {}), '(glm_save_path)\n', (2539, 2554), False, 'import os\n'), ((2682, 2712), 'xarray.open_dataset', 'xr.open_dataset', (['glm_save_path'], {}), '(glm_save_path)\n', (2697, 2712), True, 'import xarray as xr\n'), ((2798, 2830), 'tobac_flow.dataset.create_new_goes_ds', 'create_new_goes_ds', (['detection_ds'], {}), '(detection_ds)\n', (2816, 2830), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((2970, 3155), 'tobac_flow.io.find_glm_files', 'io.find_glm_files', (['dates'], {'satellite': '(16)', 'save_dir': 'goes_data_path', 'replicate_path': '(True)', 'check_download': '(True)', 'n_attempts': '(1)', 'download_missing': '(True)', 'verbose': '(True)', 'min_storage': '(2 ** 30)'}), '(dates, satellite=16, save_dir=goes_data_path,\n replicate_path=True, check_download=True, n_attempts=1,\n download_missing=True, verbose=True, min_storage=2 ** 30)\n', (2987, 3155), False, 'from tobac_flow import io, abi, glm\n'), ((4329, 4343), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4341, 4343), False, 'from datetime import datetime, timedelta\n'), ((5669, 5683), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5681, 5683), False, 'from datetime import datetime, timedelta\n'), ((6488, 6506), 'numpy.float64', 'np.float64', (['np.nan'], {}), '(np.nan)\n', (6498, 6506), True, 'import numpy as np\n'), ((6533, 6547), 'numpy.zeros', 'np.zeros', (['[40]'], {}), '([40])\n', (6541, 6547), True, 'import numpy as np\n'), ((6566, 6584), 'numpy.float64', 'np.float64', (['np.nan'], {}), '(np.nan)\n', (6576, 6584), True, 'import numpy as np\n'), ((6608, 6622), 'numpy.zeros', 'np.zeros', (['[40]'], {}), '([40])\n', (6616, 6622), True, 'import numpy as np\n'), ((6643, 6661), 'numpy.float64', 'np.float64', (['np.nan'], {}), '(np.nan)\n', (6653, 6661), True, 'import numpy as np\n'), ((6687, 6701), 'numpy.zeros', 'np.zeros', (['[40]'], {}), '([40])\n', (6695, 6701), True, 'import numpy as np\n'), ((6981, 7049), 'tobac_flow.validation.get_min_dist_for_objects', 'get_min_dist_for_objects', (['glm_distance', 'detection_ds.core_label.data'], {}), '(glm_distance, detection_ds.core_label.data)\n', (7005, 7049), False, 'from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance\n'), ((7373, 7391), 'numpy.float64', 'np.float64', (['np.nan'], {}), '(np.nan)\n', (7383, 7391), True, 'import numpy as np\n'), ((7418, 7432), 'numpy.zeros', 'np.zeros', (['[40]'], {}), '([40])\n', (7426, 7432), True, 'import numpy as np\n'), ((7663, 7730), 'tobac_flow.validation.get_min_dist_for_objects', 'get_min_dist_for_objects', (['glm_distance', 'detection_ds.wvd_label.data'], {}), '(glm_distance, detection_ds.wvd_label.data)\n', (7687, 7730), False, 'from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance\n'), ((8017, 8035), 'numpy.float64', 'np.float64', (['np.nan'], {}), '(np.nan)\n', (8027, 8035), True, 'import numpy as np\n'), ((8059, 8073), 'numpy.zeros', 'np.zeros', (['[40]'], {}), '([40])\n', (8067, 8073), True, 'import numpy as np\n'), ((8322, 8397), 'tobac_flow.validation.get_min_dist_for_objects', 'get_min_dist_for_objects', (['glm_distance', 'detection_ds.thick_anvil_label.data'], {}), '(glm_distance, detection_ds.thick_anvil_label.data)\n', (8346, 8397), False, 'from tobac_flow.validation import get_min_dist_for_objects, get_marker_distance\n'), ((8705, 8723), 'numpy.float64', 'np.float64', (['np.nan'], {}), '(np.nan)\n', (8715, 8723), True, 'import numpy as np\n'), ((8749, 8763), 'numpy.zeros', 'np.zeros', (['[40]'], {}), '([40])\n', (8757, 8763), True, 'import numpy as np\n'), ((9293, 9314), 'numpy.sum', 'np.sum', (['glm_grid.data'], {}), '(glm_grid.data)\n', (9299, 9314), True, 'import numpy as np\n'), ((9473, 9635), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['flash_distance_to_marker', "('flash',)", '"""flash_core_distance"""'], {'long_name': '"""closest distance from flash to detected core"""', 'dtype': 'np.float32'}), "(flash_distance_to_marker, ('flash',),\n 'flash_core_distance', long_name=\n 'closest distance from flash to detected core', dtype=np.float32)\n", (9489, 9635), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((9749, 9913), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['flash_distance_to_wvd', "('flash',)", '"""flash_wvd_distance"""'], {'long_name': '"""closest distance from flash to detected wvd region"""', 'dtype': 'np.float32'}), "(flash_distance_to_wvd, ('flash',), 'flash_wvd_distance',\n long_name='closest distance from flash to detected wvd region', dtype=\n np.float32)\n", (9765, 9913), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((10027, 10190), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['flash_distance_to_anvil', "('flash',)", '"""flash_anvil_distance"""'], {'long_name': '"""closest distance from flash to detected anvil"""', 'dtype': 'np.float32'}), "(flash_distance_to_anvil, ('flash',),\n 'flash_anvil_distance', long_name=\n 'closest distance from flash to detected anvil', dtype=np.float32)\n", (10043, 10190), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((10574, 10720), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['anvil_min_distance', "('anvil',)", '"""anvil_glm_distance"""'], {'long_name': '"""closest distance from anvil to GLM flash"""', 'dtype': 'np.float32'}), "(anvil_min_distance, ('anvil',), 'anvil_glm_distance',\n long_name='closest distance from anvil to GLM flash', dtype=np.float32)\n", (10590, 10720), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((10839, 10958), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['anvil_margin_flag', "('anvil',)", '"""anvil_margin_flag"""'], {'long_name': '"""margin flag for anvil"""', 'dtype': 'bool'}), "(anvil_margin_flag, ('anvil',), 'anvil_margin_flag',\n long_name='margin flag for anvil', dtype=bool)\n", (10855, 10958), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((11077, 11203), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['anvil_far_hist', "('bins',)", '"""anvil_far_histogram"""'], {'long_name': '"""FAR histogram for anvils"""', 'dtype': 'np.float32'}), "(anvil_far_hist, ('bins',), 'anvil_far_histogram',\n long_name='FAR histogram for anvils', dtype=np.float32)\n", (11093, 11203), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((11322, 11448), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['anvil_pod_hist', "('bins',)", '"""anvil_pod_histogram"""'], {'long_name': '"""POD histogram for anvils"""', 'dtype': 'np.float32'}), "(anvil_pod_hist, ('bins',), 'anvil_pod_histogram',\n long_name='POD histogram for anvils', dtype=np.float32)\n", (11338, 11448), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((12272, 12411), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['wvd_min_distance', "('wvd',)", '"""wvd_glm_distance"""'], {'long_name': '"""closest distance from wvd to GLM flash"""', 'dtype': 'np.float32'}), "(wvd_min_distance, ('wvd',), 'wvd_glm_distance', long_name=\n 'closest distance from wvd to GLM flash', dtype=np.float32)\n", (12288, 12411), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((12529, 12641), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['wvd_margin_flag', "('wvd',)", '"""wvd_margin_flag"""'], {'long_name': '"""margin flag for wvd"""', 'dtype': 'bool'}), "(wvd_margin_flag, ('wvd',), 'wvd_margin_flag', long_name=\n 'margin flag for wvd', dtype=bool)\n", (12545, 12641), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((12759, 12880), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['wvd_far_hist', "('bins',)", '"""wvd_far_histogram"""'], {'long_name': '"""FAR histogram for wvds"""', 'dtype': 'np.float32'}), "(wvd_far_hist, ('bins',), 'wvd_far_histogram', long_name=\n 'FAR histogram for wvds', dtype=np.float32)\n", (12775, 12880), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((12998, 13119), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['wvd_pod_hist', "('bins',)", '"""wvd_pod_histogram"""'], {'long_name': '"""POD histogram for wvds"""', 'dtype': 'np.float32'}), "(wvd_pod_hist, ('bins',), 'wvd_pod_histogram', long_name=\n 'POD histogram for wvds', dtype=np.float32)\n", (13014, 13119), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((13927, 14071), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['growth_min_distance', "('core',)", '"""core_glm_distance"""'], {'long_name': '"""closest distance from core to GLM flash"""', 'dtype': 'np.float32'}), "(growth_min_distance, ('core',), 'core_glm_distance',\n long_name='closest distance from core to GLM flash', dtype=np.float32)\n", (13943, 14071), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((14190, 14307), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['growth_margin_flag', "('core',)", '"""core_margin_flag"""'], {'long_name': '"""margin flag for core"""', 'dtype': 'bool'}), "(growth_margin_flag, ('core',), 'core_margin_flag',\n long_name='margin flag for core', dtype=bool)\n", (14206, 14307), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((14426, 14551), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['growth_far_hist', "('bins',)", '"""core_far_histogram"""'], {'long_name': '"""FAR histogram for cores"""', 'dtype': 'np.float32'}), "(growth_far_hist, ('bins',), 'core_far_histogram',\n long_name='FAR histogram for cores', dtype=np.float32)\n", (14442, 14551), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((14670, 14795), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['growth_pod_hist', "('bins',)", '"""core_pod_histogram"""'], {'long_name': '"""POD histogram for cores"""', 'dtype': 'np.float32'}), "(growth_pod_hist, ('bins',), 'core_pod_histogram',\n long_name='POD histogram for cores', dtype=np.float32)\n", (14686, 14795), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((15582, 15596), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15594, 15596), False, 'from datetime import datetime, timedelta\n'), ((2068, 2159), 'pandas.date_range', 'pd.date_range', (['detection_ds.t.data[0]', 'detection_ds.t.data[-1]'], {'freq': '"""H"""', 'closed': '"""left"""'}), "(detection_ds.t.data[0], detection_ds.t.data[-1], freq='H',\n closed='left')\n", (2081, 2159), True, 'import pandas as pd\n'), ((2590, 2604), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2602, 2604), False, 'from datetime import datetime, timedelta\n'), ((2846, 2860), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2858, 2860), False, 'from datetime import datetime, timedelta\n'), ((3281, 3300), 'tobac_flow.io.get_goes_date', 'io.get_goes_date', (['i'], {}), '(i)\n', (3297, 3300), False, 'from tobac_flow import io, abi, glm\n'), ((3594, 3654), 'tobac_flow.glm.regrid_glm', 'glm.regrid_glm', (['glm_files', 'gridded_flash_ds'], {'corrected': '(False)'}), '(glm_files, gridded_flash_ds, corrected=False)\n', (3608, 3654), False, 'from tobac_flow import io, abi, glm\n'), ((3684, 3825), 'tobac_flow.dataset.create_dataarray', 'create_dataarray', (['glm_grid.data', "('t', 'y', 'x')", '"""glm_flashes"""'], {'long_name': '"""number of flashes detected by GLM"""', 'units': '""""""', 'dtype': 'np.int32'}), "(glm_grid.data, ('t', 'y', 'x'), 'glm_flashes', long_name=\n 'number of flashes detected by GLM', units='', dtype=np.int32)\n", (3700, 3825), False, 'from tobac_flow.dataset import get_datetime_from_coord, get_time_diff_from_coord, create_new_goes_ds, add_dataarray_to_ds, create_dataarray\n'), ((4206, 4220), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4218, 4220), False, 'from datetime import datetime, timedelta\n'), ((4785, 4818), 'numpy.full', 'np.full', (['marker_distance.shape', '(1)'], {}), '(marker_distance.shape, 1)\n', (4792, 4818), True, 'import numpy as np\n'), ((5832, 5870), 'numpy.sum', 'np.sum', (['(flash_distance_to_marker <= 10)'], {}), '(flash_distance_to_marker <= 10)\n', (5838, 5870), True, 'import numpy as np\n'), ((6051, 6086), 'numpy.sum', 'np.sum', (['(flash_distance_to_wvd <= 10)'], {}), '(flash_distance_to_wvd <= 10)\n', (6057, 6086), True, 'import numpy as np\n'), ((6260, 6297), 'numpy.sum', 'np.sum', (['(flash_distance_to_anvil <= 10)'], {}), '(flash_distance_to_anvil <= 10)\n', (6266, 6297), True, 'import numpy as np\n'), ((6761, 6846), 'tobac_flow.analysis.apply_func_to_labels', 'apply_func_to_labels', (['detection_ds.core_label.data', 'edge_filter_array', 'np.nanmin'], {}), '(detection_ds.core_label.data, edge_filter_array, np.nanmin\n )\n', (6781, 6846), False, 'from tobac_flow.analysis import filter_labels_by_length, filter_labels_by_length_and_mask, apply_func_to_labels\n'), ((7104, 7156), 'numpy.sum', 'np.sum', (['(growth_min_distance[growth_margin_flag] > 10)'], {}), '(growth_min_distance[growth_margin_flag] > 10)\n', (7110, 7156), True, 'import numpy as np\n'), ((7456, 7535), 'tobac_flow.analysis.apply_func_to_labels', 'apply_func_to_labels', (['detection_ds.wvd_label.data', 'edge_filter_array', 'np.nanmin'], {}), '(detection_ds.wvd_label.data, edge_filter_array, np.nanmin)\n', (7476, 7535), False, 'from tobac_flow.analysis import filter_labels_by_length, filter_labels_by_length_and_mask, apply_func_to_labels\n'), ((7779, 7825), 'numpy.sum', 'np.sum', (['(wvd_min_distance[wvd_margin_flag] > 10)'], {}), '(wvd_min_distance[wvd_margin_flag] > 10)\n', (7785, 7825), True, 'import numpy as np\n'), ((8099, 8190), 'tobac_flow.analysis.apply_func_to_labels', 'apply_func_to_labels', (['detection_ds.thick_anvil_label.data', 'edge_filter_array', 'np.nanmin'], {}), '(detection_ds.thick_anvil_label.data, edge_filter_array,\n np.nanmin)\n', (8119, 8190), False, 'from tobac_flow.analysis import filter_labels_by_length, filter_labels_by_length_and_mask, apply_func_to_labels\n'), ((8449, 8499), 'numpy.sum', 'np.sum', (['(anvil_min_distance[anvil_margin_flag] > 10)'], {}), '(anvil_min_distance[anvil_margin_flag] > 10)\n', (8455, 8499), True, 'import numpy as np\n'), ((3521, 3535), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3533, 3535), False, 'from datetime import datetime, timedelta\n'), ((3974, 3995), 'numpy.sum', 'np.sum', (['glm_grid.data'], {}), '(glm_grid.data)\n', (3980, 3995), True, 'import numpy as np\n'), ((5911, 5973), 'numpy.histogram', 'np.histogram', (['flash_distance_to_marker'], {'bins': '(40)', 'range': '[0, 40]'}), '(flash_distance_to_marker, bins=40, range=[0, 40])\n', (5923, 5973), True, 'import numpy as np\n'), ((6124, 6183), 'numpy.histogram', 'np.histogram', (['flash_distance_to_wvd'], {'bins': '(40)', 'range': '[0, 40]'}), '(flash_distance_to_wvd, bins=40, range=[0, 40])\n', (6136, 6183), True, 'import numpy as np\n'), ((6337, 6398), 'numpy.histogram', 'np.histogram', (['flash_distance_to_anvil'], {'bins': '(40)', 'range': '[0, 40]'}), '(flash_distance_to_anvil, bins=40, range=[0, 40])\n', (6349, 6398), True, 'import numpy as np\n'), ((7202, 7279), 'numpy.histogram', 'np.histogram', (['growth_min_distance[growth_margin_flag]'], {'bins': '(40)', 'range': '[0, 40]'}), '(growth_min_distance[growth_margin_flag], bins=40, range=[0, 40])\n', (7214, 7279), True, 'import numpy as np\n'), ((7865, 7936), 'numpy.histogram', 'np.histogram', (['wvd_min_distance[wvd_margin_flag]'], {'bins': '(40)', 'range': '[0, 40]'}), '(wvd_min_distance[wvd_margin_flag], bins=40, range=[0, 40])\n', (7877, 7936), True, 'import numpy as np\n'), ((8543, 8618), 'numpy.histogram', 'np.histogram', (['anvil_min_distance[anvil_margin_flag]'], {'bins': '(40)', 'range': '[0, 40]'}), '(anvil_min_distance[anvil_margin_flag], bins=40, range=[0, 40])\n', (8555, 8618), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from pandas import Index
from xarray import Dataset, DataArray, set_options, concat
from .. import fun as ff
__all__ = ['apply_threshold', 'snht',
'adjust_mean', 'adjust_percentiles', 'adjust_percentiles_ref', 'adjust_reference_period',
'breakpoint_statistics', 'get_breakpoints']
def snht(data, dim='time', var=None, dep=None, suffix=None, window=1460, missing=600, **kwargs):
""" Calculate a Standard Normal Homogeinity Test
Args:
data (DataArray, Dataset):
dim (str): datetime dimension
var (str): variable if Dataset
dep (str, DataArray): departure variable
suffix (str): add to name of new variables
window (int): running window (timesteps)
missing (int): allowed missing values in window
Returns:
Dataset : test statistics
or
DataArray
"""
from .det import test
if not isinstance(data, (DataArray, Dataset)):
raise ValueError('Requires an xarray DataArray or Dataset', type(data))
if isinstance(data, DataArray):
idata = data.copy()
var = ff.suche123(idata.name, var, 'var')
else:
ivars = list(data.data_vars)
if len(ivars) == 1:
var = ivars[0]
elif var is None:
raise ValueError("Dataset requires a var")
else:
pass
idata = data[var].copy()
if dim not in idata.dims:
raise ValueError('requires a datetime dimension', dim)
if suffix is not None:
if suffix[0] != '_':
suffix = '_' + suffix
raise Warning('suffix needs an _. Added:', suffix)
else:
suffix = ''
axis = idata.dims.index(dim)
attrs = idata.attrs.copy()
dep_add = False
if dep is not None:
if isinstance(dep, str) and isinstance(data, Dataset):
dep = data[dep]
elif isinstance(dep, DataArray):
dep = dep
dep_add = True
else:
raise ValueError("dep var not present")
#
with set_options(keep_attrs=True):
idata = (idata - dep.reindex_like(idata))
ff.xarray.set_attrs(idata, 'standard_name', add='_departure', default='departure')
stest = np.apply_along_axis(test, axis, idata.values, window, missing)
attrs.update({'units': '1', 'window': window, 'missing': missing})
if isinstance(data, DataArray):
data = data.to_dataset(name=var)
if dep is not None:
data[var + '_dep' + suffix] = idata
if dep_add:
data[dep.name if dep.name is not None else 'dep'] = dep
attrs['cell_method'] = 'departure ' + dep.name + attrs.get('cell_method', '')
data[var + '_snht' + suffix] = (list(idata.dims), stest)
ff.xarray.set_attrs(data[var + '_snht' + suffix], 'standard_name', add='_snht', default='snht')
data[var + '_snht' + suffix].attrs.update(attrs)
return data
def apply_threshold(data, dim='time', var=None, name='breaks', suffix=None, thres=50, dist=730, min_levels=3,
ensemble=False, **kwargs):
""" Apply threshold on SNHT to detect breakpoints
Args:
data (DataArray, Dataset):
dim (str): datetime dimension
var (str): variable if Dataset
name (str): name of new variable with above threshold (breaks)
suffix (str): add to name of new variables
thres (int, float): threshold value
dist (int): distance between breaks
min_levels (int): minimum significant levels for breaks
ensemble (bool): run ensemble on thresholds, nthres=50,
Returns:
Dataset
"""
from xarray import DataArray, Dataset
from .det import detector, detector_ensemble
if not isinstance(data, (DataArray, Dataset)):
raise ValueError('Requires an xarray DataArray or Dataset', type(data))
if suffix is not None:
if suffix[0] != '_':
suffix = '_' + suffix
raise Warning('suffix needs an _. Added:', suffix)
else:
suffix = ''
if isinstance(data, DataArray):
idata = data.copy() # link
var = idata.name if idata.name is not None else 'var'
else:
if var is None or var not in list(data.data_vars):
raise ValueError('Requires a variable name: var=', list(data.data_vars))
idata = data[var] # link
if idata.ndim > 2:
raise ValueError("Maximum of 2 dimensions: ", idata.shape)
if dim not in idata.dims:
raise ValueError('requires a datetime dimension', dim)
axis = idata.dims.index(dim)
params = {'units': '1', 'thres': thres, 'dist': dist, 'min_levels': min_levels, 'standard_name': 'break_flag',
'flag_valus': [0, 1, 2, 3], 'valid_range': (0, 3),
'flag_meanings': 'not_significant significant significant_at_other_level significant_at_level'}
if ensemble:
kwargs['nthres'] = kwargs.get('nthres', 50)
breaks = detector_ensemble(idata.values, axis=axis, **kwargs)
params['thres'] = 'ens%d' % kwargs.get('nthres')
else:
breaks = detector(idata.values, axis=axis, dist=dist, thres=thres, min_levels=min_levels,
**ff.levelup(**kwargs))
name = var + '_' + name + suffix
if isinstance(data, DataArray):
data = idata.to_dataset(name=var)
data[name] = (list(idata.dims), breaks)
data[name].attrs.update(params)
return data
def get_breakpoints(data, value=2, dim='time', return_startstop=False, startstop_min=0, **kwargs):
""" Return breakpoints
Args:
data (DataArray): input dataset
value (int): breakpoint indicator value
dim (str): datetime dim
startstop_min (int):
return_startstop (bool):
**kwargs:
Returns:
list : breakpoints
"""
if not isinstance(data, DataArray):
raise ValueError("Require a DataArray / Dataset object", type(data))
if dim not in data.dims:
raise ValueError("Requires a datetime dimension", data.dims)
if len(data.dims) > 2:
RuntimeWarning("More than two dimensions found: ", str(data.dims))
#
# Dimension of time
#
axis = data.dims.index(dim)
#
# Search Threshold
#
tmp = np.where(data.values >= value)
i = list(map(int, np.unique(tmp[axis])))
dates = np.datetime_as_string(data[dim].values, unit='D')
e = []
s = []
#
# multi-dimension / combine to only time axis
#
if data.ndim > 1:
summe = data.values.sum(axis=1 if axis == 0 else 0)
else:
summe = data.values
for k in i:
l = np.where(summe[:k][::-1] <= startstop_min)[0][0]
m = np.where(summe[k:] <= startstop_min)[0][0]
e += [k - l]
s += [k + m]
if kwargs.get('verbose', 0) > 0:
if len(i) > 0:
ff.message("Breakpoints for ", data.name, **kwargs)
ff.message("[%8s] [%8s] [%8s] [%8s] [ #]" % ('idx', 'end', 'peak', 'start'), **kwargs)
ff.message("\n".join(
["[%8s] %s %s %s %4d" % (j, dates[l], dates[j], dates[k], k - l) for j, k, l in zip(i, s, e)]),
**kwargs)
if return_startstop:
return i, e, s
return i
def adjust_mean(data, name, breakname, dim='time', suffix='_m', ratio=False, **kwargs):
"""Detect and Correct Radiosonde biases from Departure Statistics
Use ERA-Interim departures to detect breakpoints and
adjustments these with a mean adjustment going back in time.
Args:
data (Dataset): Input Dataset with different variables
name (str): Name of variable to adjust
breakname (str): Name of variable with breakpoint information
dim (str): datetime dimension
suffix (str): add to name of new variables
ratio (bool): differences or ratios?
Optional Args:
sample_size (int): minimum Sample size [130]
borders (int): biased sample before and after a break [None]
recent (bool): Use all recent Data to adjustments
ratio (bool): Use ratio instead of differences
Returns:
Dataset
"""
from . import adj
if not isinstance(data, Dataset):
raise ValueError("Requires a Dataset object", type(data))
if not isinstance(name, str):
raise ValueError("Requires a string name", type(name))
if name not in data.data_vars:
raise ValueError("dataset var not present")
if breakname not in data.data_vars:
raise ValueError("requires a breaks dataset var")
#
# Copy data
#
idata = data[name].copy()
values = idata.values
#
# get Breakpoints
#
breaks = get_breakpoints(data[breakname], dim=dim, **ff.levelup(**kwargs))
axis = idata.dims.index(dim)
params = idata.attrs.copy() # deprecated (xr-patch)
ff.message(name, str(values.shape), 'A:', axis, **kwargs)
params.update(
{'sample_size': kwargs.get('sample_size', 130), 'borders': kwargs.get('borders', 90), 'ratio': int(ratio)})
ff.message(ff.dict2str(params), **ff.levelup(**kwargs))
stdn = data[name].attrs.get('standard_name', name)
data[name + suffix] = (idata.dims, adj.mean(values, breaks, axis=axis, ratio=ratio, **kwargs))
data[name + suffix].attrs.update(params)
data[name + suffix].attrs['biascor'] = 'mean'
if 'niter' in data[name + suffix].attrs:
data[name + suffix].attrs['niter'] += 1
else:
data[name + suffix].attrs['niter'] = 1
data[name + suffix].attrs['standard_name'] = stdn + '_mean_adj'
return data
def adjust_percentiles(data, name, breakname, dim='time', dep_var=None, suffix='_q', percentilen=None, **kwargs):
"""Detect and Correct Radiosonde biases from Departure Statistics
Use ERA-Interim departures to detect breakpoints and
adjustments these with a mean and a percentile adjustment going back in time.
Args:
data (Dataset): Input Dataset with different variables
name (str): Name of variable to adjust
breakname (str): Name of variable with breakpoint information
dim (str): datetime dimension
dep_var (str): Name of variable to use as a departure
suffix (str): add to name of new variables
percentilen (list): percentiles for percentile_cor
Optional Args:
sample_size (int): minimum Sample size [130]
borders (int): biased sample before and after a break [None]
bounded (tuple): limit correction to bounds
recent (bool): Use all recent Data to adjustments
ratio (bool): Use ratio instead of differences
Returns:
Dataset
"""
from . import adj
if not isinstance(data, Dataset):
raise ValueError("Requires a Dataset object", type(data))
if not isinstance(name, str):
raise ValueError("Requires a string name", type(name))
if name not in data.data_vars:
raise ValueError("dataset var not present")
if breakname not in data.data_vars:
raise ValueError("requires a breaks dataset var")
idata = data[name].copy()
if dep_var is not None:
if dep_var not in data.data_vars:
raise ValueError("dep var not present", data.data_vars)
with set_options(keep_attrs=True):
idata = (idata - data[dep_var].reindex_like(idata))
if percentilen is None:
percentilen = np.arange(0, 101, 10)
values = idata.values
breaks = get_breakpoints(data[breakname], dim=dim, **ff.levelup(**kwargs))
axis = idata.dims.index(dim)
params = idata.attrs.copy() # deprecated (xr-patch)
ff.message(name, str(values.shape), 'A:', axis, 'Q:', np.size(percentilen), "Dep:", str(dep_var), **kwargs)
params.update({'sample_size': kwargs.get('sample_size', 130), 'borders': kwargs.get('borders', 90)})
ff.message(ff.dict2str(params), **ff.levelup(**kwargs))
stdn = data[name].attrs.get('standard_name', name)
data[name + suffix] = (
idata.dims, adj.percentile(values, breaks, axis=axis, percentilen=percentilen, **kwargs))
data[name + suffix].attrs.update(params)
data[name + suffix].attrs['biascor'] = 'percentil'
data[name + suffix].attrs['standard_name'] = stdn + '_percentil_adj'
return data
def adjust_percentiles_ref(data, name, adjname, breakname, dim='time', suffix='_qa', percentilen=None,
adjust_reference=True, **kwargs):
"""Detect and Correct Radiosonde biases from Departure Statistics
Use ERA-Interim departures to detect breakpoints and
adjustments these with a mean and a percentile adjustment going back in time.
Args:
data (Dataset): Input Dataset with different variables
name (str): Name of variable to adjust
adjname (str): Name of adjust variable
breakname (str): Name of variable with breakpoint information
dim (str): datetime dimension
suffix (str): add to name of new variables
percentilen (list): percentilen
adjust_reference (bool): return adjusted reference?
Optional Args:
sample_size (int): minimum Sample size [130]
borders (int): biased sample before and after a break [None]
recent (bool): Use all recent Data to adjustments
ratio (bool): Use ratio instead of differences
ref_period (slice): period to use for quantile matching of reference
Returns:
Dataset
"""
from . import adj
if not isinstance(data, Dataset):
raise ValueError("Requires a Dataset object", type(data))
if not isinstance(name, str):
raise ValueError("Requires a string name", type(name))
if name not in data.data_vars:
raise ValueError("dataset var not present")
if adjname not in data.data_vars:
raise ValueError("dataset var not present")
if breakname not in data.data_vars:
raise ValueError("requires a breaks dataset var")
if suffix is not None:
if suffix[0] != '_':
suffix = '_' + suffix
Warning('suffix needs an _. Added:', suffix)
else:
suffix = ''
if percentilen is None:
percentilen = np.arange(0, 101, 10)
values = data[name].values.copy()
avalues = data[adjname].values.copy()
breaks = get_breakpoints(data[breakname], dim=dim, **ff.levelup(**kwargs))
axis = data[name].dims.index(dim)
params = data[name].attrs.copy() # deprecated (xr-patch)
ff.message(name, str(values.shape), 'A:', axis, 'Q:', np.size(percentilen), "Adj:", adjname, **kwargs)
params.update({'sample_size': kwargs.get('sample_size', 130), 'borders': kwargs.get('borders', 90)})
ff.message(ff.dict2str(params), **ff.levelup(**kwargs))
#
# Adjust reference to a reference period?
#
if adjust_reference:
avalues = adj.percentile_reference_period(values, avalues, breaks, axis=axis, percentilen=percentilen, **kwargs)
#
# Adjust according to reference dataset
#
if False:
values = adj.percentile_reference(values, avalues, breaks, axis=axis, percentilen=percentilen, **kwargs)
else:
#
# use adjusted era as reference and calculate departures -> adj departures
#
values = values - avalues
values = adj.percentile(values, breaks, axis=axis, percentilen=percentilen, **kwargs)
values = values + avalues
ff.message(name, 'using QA-adj departures', **kwargs)
# values = adj.percentile_reference(values, avalues, breaks, axis=axis, percentilen=percentilen, **kwargs)
data[name + suffix] = (data[name].dims, values)
data[name + suffix].attrs.update(params)
data[name + suffix].attrs['biascor'] = 'percentil_ref'
data[name + suffix].attrs['reference'] = adjname
if adjust_reference:
#
# fix for no breakpoints
#
if len(breaks) > 0:
ref_period = data[dim].values[breaks[-1]].astype('M8[M]').astype('str') + ' -'
else:
ref_period = '-'
data[adjname + suffix] = (data[adjname].dims, avalues)
data[adjname + suffix].attrs.update(params)
data[adjname + suffix].attrs['ref_period'] = kwargs.get('ref_period', ref_period)
data[adjname + suffix].attrs['reference'] = name
return data
def adjust_reference_period(data, name, refname, breakname, dim='time', suffix='_qa', percentilen=None, **kwargs):
from . import adj
if not isinstance(data, Dataset):
raise ValueError("Requires a Dataset object", type(data))
if not isinstance(name, str):
raise ValueError("Requires a string name", type(name))
if name not in data.data_vars:
raise ValueError("dataset var not present")
if refname not in data.data_vars:
raise ValueError("dataset var not present")
if breakname not in data.data_vars:
raise ValueError("requires a breaks dataset var")
if suffix is not None:
if suffix[0] != '_':
suffix = '_' + suffix
Warning('suffix needs an _. Added:', suffix)
else:
suffix = ''
if percentilen is None:
percentilen = np.arange(0, 101, 10)
values = data[refname].values.copy() # RASO
avalues = data[name].values.copy() # Reanalysis (ERA)
breaks = get_breakpoints(data[breakname], dim=dim, **ff.levelup(**kwargs))
axis = data[name].dims.index(dim)
params = data[name].attrs.copy() # deprecated (xr-patch)
ff.message(name, str(values.shape), 'A:', axis, 'Q:', np.size(percentilen), "Adj:", refname, **kwargs)
params.update({'sample_size': kwargs.get('sample_size', 130), 'borders': kwargs.get('borders', 90)})
ff.message(ff.dict2str(params), **ff.levelup(**kwargs))
stdn = data[name].attrs.get('standard_name', name)
#
# Adjust name with refname in reference period
#
avalues = adj.percentile_reference_period(values, avalues, breaks, axis=axis, percentilen=percentilen, **kwargs)
data[name + suffix] = (data[name].dims, avalues)
data[name + suffix].attrs.update(params)
data[name + suffix].attrs['standard_name'] = stdn + '_percentil_adj'
return data
# def apply_bounds(data, name, other, lower, upper):
# "Apply bounds and replace"
# logic = data[name].values < lower
# n = np.sum(logic)
# data[name].values = np.where(logic, data[other].values, data[name].values)
# logic = data[name].values > upper
# n += np.sum(logic)
# data[name].values = np.where(logic, data[other].values, data[name].values)
# data[name].attrs['bounds'] = "[%d , %d]" % (lower, upper)
# print("Outside bounds [", lower, "|", upper, "] :", n)
#
#
# def correct_loop(dataset, dep_var=None, use_dep=False, mean_cor=False, percentile_cor=False, percentile_adj=None,
# percentilen=None, clim_ano=True, **kwargs):
# funcid = "[DC] Loop "
# if not isinstance(dataset, DataArray):
# raise ValueError(funcid + "Requires a DataArray class object")
#
# if not mean_cor and not percentile_cor and percentile_adj is None:
# raise RuntimeError(funcid + "Requires a correction: mean_cor, percentile_cor or percentile_adj")
#
# if np.array([mean_cor, percentile_cor, percentile_adj is not None]).sum() > 1:
# raise RuntimeError(funcid + "Only one Method at a time is allowed!")
#
# xdata = dataset.copy()
#
# # Make Large Arrays with all iterations ?
# dataset = dataset.copy()
# dims = dataset.get_dimension_values()
# dims['iter'] = [0]
# order = dataset.dims.list[:] + ['iter']
# dataset.update_values_dims_remove(np.expand_dims(dataset.values, axis=-1), order, dims)
# # dataset.dims['iter'].set_attrs({''}) # ?
# sdata = dataset.copy()
# sdata.values[:] = 0.
# sdata.name += '_snht'
# bdata = dataset.copy()
# bdata.values[:] = 0.
# bdata.name += '_breaks'
# status = True
# i = 1
# while status:
# status, stest, breaks, xdata = adjustments(xdata, dep_var=dep_var, use_dep=use_dep, mean_cor=mean_cor,
# percentile_cor=percentile_cor, percentile_adj=percentile_adj,
# percentilen=percentilen, clim_ano=clim_ano,
# **kwargs)
# # combine
# dataset.values = np.concatenate((dataset.values, np.expand_dims(xdata.values, axis=-1)), axis=-1)
# # dataset.update_values_dims()
# bdata.values = np.concatenate((bdata.values, np.expand_dims(breaks.values, axis=-1)), axis=-1)
# sdata.values = np.concatenate((sdata.values, np.expand_dims(stest.values, axis=-1)), axis=-1)
#
# # Does the adjustments still change anything ?
# test = np.abs(np.nansum(dataset.values[:, :, i - 1] - xdata.values)) # sum of differences
# if test < 0.1:
# break
# message(funcid + "%02d Breaks: \n" % i, **kwargs)
# i += 1
# # SAVE
# dataset.update_values_dims(dataset.values, {'iter': range(i + 1)})
# sdata.update_values_dims(sdata.values, {'iter': range(i + 1)})
# bdata.update_values_dims(bdata.values, {'iter': range(i + 1)})
# sdata.attrs['iterations'] = i
#
# params = {'sample_size': kwargs.get('sample_size', 730),
# 'borders': kwargs.get('borders', 180),
# 'bounded': str(kwargs.get('bounded', '')),
# 'recent': kwargs.get('recent', False),
# 'ratio': kwargs.get('ratio', True)}
#
# message(funcid + "Breaks: \n", **kwargs)
# # print_breaks(bdata.subset(dims={'iter': i - 1}), verbose)
#
# if mean_cor:
# dataset.name += '_m_iter'
# dataset.attrs['biascor'] = 'mean'
# dataset.attrs['standard_name'] += '_mean_adj'
# dataset.attrs.set_items(params)
#
# elif percentile_cor:
# dataset.name += '_q_iter'
# dataset.attrs['biascor'] = 'percentile'
# dataset.attrs['standard_name'] += '_percentile_adj'
# dataset.attrs.set_items(params)
#
# elif percentile_adj is not None:
# dataset.name += '_qe_iter'
# dataset.attrs['biascor'] = 'percentile_era_adjusted'
# dataset.attrs['standard_name'] += '_percentile_era_adj'
# dataset.attrs.set_items(params)
# else:
# pass
#
# return status, sdata, bdata, dataset
#
# def adjust_table(data, name, analysis, dim='time', **kwargs):
# """
# test
# Out[23]:
# {'dpd': dataset
# mean 2
# rmse 3
# var 2, 'era': M Q
# mean -3 -3
# rmse 2 2
# var 4 4}
#
# pd.concat(test, axis=1)
# Out[22]:
# dpd era
# dataset M Q
# mean 2 -3 -3
# rmse 3 2 2
# var 2 4 4
#
# Args:
# data:
# name:
# analysis:
# dim:
# **kwargs:
#
# Returns:
#
# """
# import pandas as pd
# from ..fun import rmse
#
# axis = data[name].dims.index(dim)
# # for all reanalysis
# out = {}
# out[name] = {'dataset': {'RMSE': rmse(data[name], np.nanmean(data[name], axis=axis)),
# 'MEAN': np.nanmean(data[name]),
# 'VAR': np.nanvar(data[name])}}
# for i, iana in enumerate(analysis):
# tmp = data[[name, iana]].copy()
# # snht
# tmp = snht(tmp, dim=dim, var=name, dep=iana, **kwargs)
# # threshold
# tmp = apply_threshold(tmp, var=name + '_snht', dim=dim)
# out[iana] = {}
# out[iana]['n'] = len(get_breakpoints(tmp, dim=dim, var=name + '_snht_breaks'))
# out[iana] = {'dataset': {'RMSE': rmse(tmp[name], tmp[iana]),
# 'MEAN': np.nanmean(tmp[name] - tmp[iana]),
# 'VAR': np.nanvar(tmp[name] - tmp[iana])}}
# # adjust Mean
# tmp = adjust_mean(tmp, name, name + '_snht_breaks', dim=dim, **kwargs)
# out[iana]['mdiff'] = {'RMSE': rmse(tmp[name + '_m'], tmp[iana]),
# 'MEAN': np.nanmean(tmp[name + '_m'] - tmp[iana]),
# 'VAR': np.nanvar(tmp[name + '_m'] - tmp[iana])}
# # adjust Percentiles
# tmp = adjust_percentiles(tmp, name, name + '_snht_breaks', dim=dim, **kwargs)
# out[iana]['qdiff'] = {'RMSE': rmse(tmp[name + '_q'], tmp[iana]),
# 'MEAN': np.nanmean(tmp[name + '_q'] - tmp[iana]),
# 'VAR': np.nanvar(tmp[name + '_q'] - tmp[iana])}
# # adjust Reference
# tmp = adjust_reference_period(tmp, iana, name, name + '_snht_breaks', dim=dim, **kwargs)
# out[iana]['qrdiff'] = {'RMSE': rmse(tmp[iana + '_qa'], tmp[iana]),
# 'MEAN': np.nanmean(tmp[iana + '_qa'] - tmp[iana]),
# 'VAR': np.nanvar(tmp[iana + '_qa'] - tmp[iana])}
# # adjust Percentiles using a Reference
# tmp = adjust_percentiles_ref(tmp, name, iana, name + '_snht_breaks', dim=dim, **kwargs)
# out[iana]['qadiff'] = {'RMSE': rmse(tmp[name + '_qa'], tmp[iana]),
# 'MEAN': np.nanmean(tmp[name + '_qa'] - tmp[iana]),
# 'VAR': np.nanvar(tmp[name + '_qa'] - tmp[iana])}
#
# for ikey, idata in out.items():
# out[ikey] = pd.DataFrame(idata)
#
# return pd.concat(out, axis=1)
#
# def correct_2var(xdata, ydata):
# # Make a 3D (time, var1, var2) per level Test Statistics
# # Use that to adjustments both variables at the same time
# # ? water vapor transform -> how to unsplit vp to t,rh ?
# # t, rh -> td (esatfunc) -> vp
# # large errors -> temperature problem ?
# # smaller errors -> humidity problem ?
# # t, rh percentage of contribution to vp
# # vp (esat_inv) -> td
# pass
def breakpoint_statistics(data, breakname, dim='time', variables=None, borders=None, inbetween=True, nmax=None,
**kwargs):
"""
Args:
data (Dataset): experiment data
breakname (str): SNHT break variable
dim (str): datetime dimension
variables (list): variables to use
borders (int): breakpoint borders
inbetween (bool): calculate bordered area
nmax (int): maximum values to use
**kwargs:
Returns:
statistics (Dataset) : default nanmean breakpoint statistics before (B, later) and after (A, earlier) a breakpoint
"""
from .. import fun as ff
if not isinstance(data, Dataset):
raise ValueError("Requires a Dataset class object", type(data))
if dim not in data.coords:
raise ValueError("Requires a datetime dimension", data.coords)
if breakname not in data.data_vars:
raise ValueError("Variable breakname not present", breakname, data.data_vars)
ibreaks = get_breakpoints(data[breakname], value=kwargs.pop('breakpoint_threshold', 2), dim=dim)
nb = len(ibreaks)
if nb == 0:
ff.message("Warning no Breakpoints found", **ff.leveldown(**kwargs)) # Always print
return
#
# Variables to use ?
#
if variables is None:
variables = list(data.data_vars)
else:
variables = [i for i in variables if i in data.data_vars]
ibreakdates = list(data[dim].values[ibreaks].astype('M8[D]').astype('str'))
variables.remove(breakname)
variables = [i for i in variables if 'snht' not in i]
if borders is None:
borders = 0
if nmax is None:
nmax = 100000
data = data[variables].copy()
gattrs = data.attrs.copy()
axis = data[variables[0]].dims.index(dim)
wfunc = kwargs.pop('wfunc', ff.cal.nanfunc)
region = {}
j = 0
ibreaks = ibreaks + [data[dim].size - 1]
for i, k in enumerate(ibreakdates):
#
# Region left of breakpoint (A)
#
m = ibreaks[i + 1]
i = ibreaks[i]
region['A' + k] = data.isel(**{dim: slice(j, i)}).apply(ff.xarray.xarray_function_wrapper,
wfunc=wfunc,
dim=dim,
axis=axis,
borders=borders,
nmax=nmax,
**kwargs)
#
# Region right of breakpoint (B)
#
region['B' + k] = data.isel(**{dim: slice(i, m)}).apply(ff.xarray.xarray_function_wrapper,
wfunc=wfunc,
dim=dim,
axis=axis,
borders=borders,
nmax=nmax,
**kwargs)
#
# Region between borders at breakpoint (I)
#
if borders > 0 and inbetween:
# Area around breakpoint [bordered]
region['I' + k] = data.isel(**{dim: slice(i - borders, i + borders)}).apply(
ff.xarray.xarray_function_wrapper,
wfunc=wfunc,
dim=dim,
axis=axis,
borders=0,
nmax=nmax,
**kwargs)
ff.message("Break", j, i, m, k, **kwargs)
j = i + borders
data = concat(region.values(), dim=Index(region.keys(), name='region'))
if hasattr(wfunc, '__name__'):
if wfunc.__name__ == 'nanfunc':
gattrs['statistic'] = "nanfunc(" + kwargs.get('func', 'nanmean') + ")"
else:
gattrs['statistic'] = wfunc.__name__
else:
gattrs['statistic'] = str(wfunc)
if nmax is not 100000:
gattrs['max_sample'] = nmax
if borders > 0:
gattrs['borders'] = borders
if inbetween:
gattrs['inbetween'] = True
data.attrs.update(gattrs)
return data
def breakpoint_info(data, snhtname, breakname, dim='time', thres=50, **kwargs):
if not isinstance(data, Dataset):
raise ValueError('Requires an xarray Dataset', type(data))
if snhtname not in data.data_vars:
raise ValueError('Requires a variable name: snhtname', list(data.data_vars))
if breakname not in data.data_vars:
raise ValueError('Requires a variable name: breakname', list(data.data_vars))
if dim not in data.dims:
raise ValueError('requires a datetime dimension', dim)
# get breakpoints
breaks = get_breakpoints(data[breakname], dim=dim, **kwargs)
# how many levels
for ibreak in breaks:
# 0: no significant, 1: significant, 2: significant at other level, 3; significant at level
print(data[dim][ibreak], data[breakname].isel({dim: ibreak}).values,
data[snhtname].isel({dim: ibreak}).values)
# message()
# look at snht and check how close
#
# shape = list(dataset[name].values.shape)
#
# dep = {getattr(ifunc, '__name__'): [] for ifunc in functions}
#
# dep['counts'] = []
# dates = dataset.coords[dim].values
# jbreaks = sorted(ibreaks, reverse=True)
# jbreaks.append(0)
# idims = list(dataset[name].dims)
# jdims = idims.copy()
# jdims.pop(axis)
# func_kwargs.update({'axis': axis})
# #
# # iterate from now to past breakpoints
# #
# for i, ib in enumerate(break_iterator(ibreaks, axis, shape, borders=borders, max_sample=max_sample)):
# period = vrange(dates[ib[axis]])
# idate = dates[jbreaks[i]]
# tmp = np.sum(np.isfinite(dataset[name][ib]), axis=axis) # is an DataArray
# tmp.coords[dim] = idate
# tmp.coords['start'] = period[0]
# tmp.coords['stop'] = period[1]
# dep['counts'].append(tmp.copy()) # counts
#
# for j, ifunc in enumerate(functions):
# iname = getattr(ifunc, '__name__')
# # Requires clear mapping of input and output dimensions
# tmp = apply_ufunc(ifunc, dataset[name][ib],
# input_core_dims=[idims],
# output_core_dims=[jdims],
# kwargs=func_kwargs)
# # tmp = ifunc(dataset[name][ib], axis=axis, **func_kwargs)
# # only for functions with ufunc capability
# tmp.coords[dim] = idate
# tmp.coords['start'] = period[0]
# tmp.coords['stop'] = period[1]
# dep[iname].append(tmp.copy())
#
# for ifunc, ilist in dep.items():
# dep[ifunc] = concat(ilist, dim=dim)
#
# dep = Dataset(dep)
# return dep
def reference_period(data, dim='time', dep_var=None, period=None, **kwargs):
from ..met.time import anomaly
if not isinstance(data, DataArray):
raise ValueError("Requires a DataArray class object", type(data))
if dim not in data.dims:
raise ValueError("Requires a datetime dimension", data.dims)
data = data.copy()
# attrs ?
if dep_var is not None:
if not isinstance(dep_var, DataArray):
raise ValueError("Requires a DataArray class object", type(dep_var))
dep = data - dep_var # Departures (do the units match?)
else:
dep, _ = anomaly(data, dim=dim, period=period)
#
# find best matching period (lowest differences)
# run SNHT
# Split into pieces
# run stats on each piece (RMSE)
# choose
# return piece + index
return None
def combine_metadata(data, dim='time', window=30,
lon=None, lat=None, distance_weight=1, distance_threshold=10,
read_igra=True, meta_ident=None, meta_weight=1,
stype=None, sonde_weight=1, **kwargs):
from .meta import location_change, metadata, sondetype
if not isinstance(data, Dataset):
raise ValueError()
# can be redundant
if lon is not None and lat is not None:
# distance in [km] of location changes
dinfo = location_change(lon, lat, dim=dim, **kwargs)
dinfo.values = np.where(dinfo.values > distance_threshold, distance_weight, 0)
# triangle shape
dinfo = dinfo.rolling(**{dim: window}, min_periods=1, center=True).sum().rolling(**{dim: window},
min_periods=1,
center=True).mean()
if stype is not None:
sinfo = sondetype(stype, dim, window=window, **kwargs)
if read_igra:
if meta_ident is None:
raise ValueError('')
minfo = metadata(meta_ident, data[dim].values, dim=dim, window=window, **kwargs)
return data
def apply_biasadjustments(adjdata, data, isodb=False, **kwargs):
# calculate the bias adjustmens and for sounding times
# interpolate between sounding times
# interpolate to table format?
# todo finish that function
# cal. adjustments adjdata - data = adj
# interpolate?, unify across missing
# quantile adjustments ? how to interpolate these across unknown
pass
| [
"numpy.size",
"numpy.datetime_as_string",
"numpy.apply_along_axis",
"numpy.where",
"numpy.arange",
"xarray.set_options",
"numpy.unique"
] | [((2283, 2345), 'numpy.apply_along_axis', 'np.apply_along_axis', (['test', 'axis', 'idata.values', 'window', 'missing'], {}), '(test, axis, idata.values, window, missing)\n', (2302, 2345), True, 'import numpy as np\n'), ((6311, 6341), 'numpy.where', 'np.where', (['(data.values >= value)'], {}), '(data.values >= value)\n', (6319, 6341), True, 'import numpy as np\n'), ((6399, 6448), 'numpy.datetime_as_string', 'np.datetime_as_string', (['data[dim].values'], {'unit': '"""D"""'}), "(data[dim].values, unit='D')\n", (6420, 6448), True, 'import numpy as np\n'), ((11437, 11458), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(10)'], {}), '(0, 101, 10)\n', (11446, 11458), True, 'import numpy as np\n'), ((11714, 11734), 'numpy.size', 'np.size', (['percentilen'], {}), '(percentilen)\n', (11721, 11734), True, 'import numpy as np\n'), ((14207, 14228), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(10)'], {}), '(0, 101, 10)\n', (14216, 14228), True, 'import numpy as np\n'), ((14548, 14568), 'numpy.size', 'np.size', (['percentilen'], {}), '(percentilen)\n', (14555, 14568), True, 'import numpy as np\n'), ((17189, 17210), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(10)'], {}), '(0, 101, 10)\n', (17198, 17210), True, 'import numpy as np\n'), ((17559, 17579), 'numpy.size', 'np.size', (['percentilen'], {}), '(percentilen)\n', (17566, 17579), True, 'import numpy as np\n'), ((34199, 34262), 'numpy.where', 'np.where', (['(dinfo.values > distance_threshold)', 'distance_weight', '(0)'], {}), '(dinfo.values > distance_threshold, distance_weight, 0)\n', (34207, 34262), True, 'import numpy as np\n'), ((2094, 2122), 'xarray.set_options', 'set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (2105, 2122), False, 'from xarray import Dataset, DataArray, set_options, concat\n'), ((6364, 6384), 'numpy.unique', 'np.unique', (['tmp[axis]'], {}), '(tmp[axis])\n', (6373, 6384), True, 'import numpy as np\n'), ((11292, 11320), 'xarray.set_options', 'set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (11303, 11320), False, 'from xarray import Dataset, DataArray, set_options, concat\n'), ((6682, 6724), 'numpy.where', 'np.where', (['(summe[:k][::-1] <= startstop_min)'], {}), '(summe[:k][::-1] <= startstop_min)\n', (6690, 6724), True, 'import numpy as np\n'), ((6743, 6779), 'numpy.where', 'np.where', (['(summe[k:] <= startstop_min)'], {}), '(summe[k:] <= startstop_min)\n', (6751, 6779), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
# Standard library
# eg copy
# absolute import rg:from copy import deepcopy
# Dependencies
# eg numpy
# absolute import eg: import numpy as np
# Project
# relative import eg: from .mod import f
import matplotlib
matplotlib.use('Agg',warn=False)
import matplotlib.pyplot as plt
import mpld3
from mpld3 import plugins
import numpy as np
from astropy.io import fits as pf
def draw_fig(image_array,image_header,catalog=None,plot=False):
from astropy import wcs
from astropy.wcs import WCS
from astropy import units as u
import astropy.coordinates as coord
fig, (ax) = plt.subplots(1, 1, figsize=(4, 3), subplot_kw={'projection': WCS(image_header)})
im = ax.imshow(image_array, origin='lower', zorder=1, interpolation='none', aspect='equal')
if catalog is not None:
lon = coord.Angle(catalog['RA_FIN'] * u.deg)
lat = coord.Angle(catalog['DEC_FIN'] * u.deg)
w = wcs.WCS(image_header)
pixcrd = w.wcs_world2pix(np.column_stack((lon, lat)), 1)
ax.plot(pixcrd[:, 0], pixcrd[:, 1], 'o', mfc='none')
for ID in xrange(catalog.size):
ax.annotate('%s' % catalog[ID]['NAME'], xy=(pixcrd[:, 0][ID], pixcrd[:, 1][ID]),color='white')
ax.set_xlabel('RA')
ax.set_ylabel('DEC')
fig.colorbar(im, ax=ax)
if plot==True:
plt.show()
plugins.connect(fig, plugins.MousePosition(fontsize=14))
return mpld3.fig_to_dict(fig)
def draw_spectrum(spectrum,dummy=False):
rmf=pf.open('rmf_62bands.fits')
src_spectrum=spectrum[8].data
E_min=rmf[3].data['E_min']
E_max=rmf[3].data['E_max']
msk=src_spectrum['RATE']>0.
y=src_spectrum['RATE']/(E_max-E_min)
x = (E_max + E_min)
dx = np.log10(np.e) * (E_max - E_min) / x
x = np.log10(x)
dy=src_spectrum['STAT_ERR']/(E_max-E_min)
dy=np.log10(np.e)*dy/y
y=np.log10(y)
fig, ax = plt.subplots(figsize=(4, 2.8))
ax.set_xlabel('log(E) keV')
ax.set_ylabel('log(counts/s/keV)')
ax.errorbar(x[msk], y[msk], yerr=dy[msk]*0.5,xerr=dx[msk]*0.5, fmt='o')
#print (x,y,dy)
plugins.connect(fig, plugins.MousePosition(fontsize=14))
return mpld3.fig_to_dict(fig)
def draw_dummy(dummy=True):
fig, ax = plt.subplots(figsize=(4, 3))
if dummy == True:
x = np.linspace(-2, 2, 200)
y = x[:, None]
image = np.zeros((200, 200, 4))
image[:, :, 0] = np.exp(- (x - 1) ** 2 - (y) ** 2)
image[:, :, 1] = np.exp(- (x + 0.71) ** 2 - (y - 0.71) ** 2)
image[:, :, 2] = np.exp(- (x + 0.71) ** 2 - (y + 0.71) ** 2)
image[:, :, 3] = np.exp(-0.25 * (x ** 2 + y ** 2))
im = ax.imshow(image,origin='lower', zorder=1, interpolation='none',aspect='equal')
fig.colorbar(im, ax=ax)
plugins.connect(fig, plugins.MousePosition(fontsize=14))
return mpld3.fig_to_dict(fig)
| [
"matplotlib.pyplot.show",
"mpld3.plugins.MousePosition",
"numpy.column_stack",
"numpy.zeros",
"astropy.wcs.WCS",
"matplotlib.use",
"astropy.io.fits.open",
"numpy.linspace",
"numpy.exp",
"mpld3.fig_to_dict",
"numpy.log10",
"matplotlib.pyplot.subplots",
"astropy.coordinates.Angle"
] | [((433, 466), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {'warn': '(False)'}), "('Agg', warn=False)\n", (447, 466), False, 'import matplotlib\n'), ((1639, 1661), 'mpld3.fig_to_dict', 'mpld3.fig_to_dict', (['fig'], {}), '(fig)\n', (1656, 1661), False, 'import mpld3\n'), ((1714, 1741), 'astropy.io.fits.open', 'pf.open', (['"""rmf_62bands.fits"""'], {}), "('rmf_62bands.fits')\n", (1721, 1741), True, 'from astropy.io import fits as pf\n'), ((1992, 2003), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (2000, 2003), True, 'import numpy as np\n'), ((2084, 2095), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (2092, 2095), True, 'import numpy as np\n'), ((2111, 2141), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 2.8)'}), '(figsize=(4, 2.8))\n', (2123, 2141), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2407), 'mpld3.fig_to_dict', 'mpld3.fig_to_dict', (['fig'], {}), '(fig)\n', (2402, 2407), False, 'import mpld3\n'), ((2459, 2487), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (2471, 2487), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3086), 'mpld3.fig_to_dict', 'mpld3.fig_to_dict', (['fig'], {}), '(fig)\n', (3081, 3086), False, 'import mpld3\n'), ((1037, 1075), 'astropy.coordinates.Angle', 'coord.Angle', (["(catalog['RA_FIN'] * u.deg)"], {}), "(catalog['RA_FIN'] * u.deg)\n", (1048, 1075), True, 'import astropy.coordinates as coord\n'), ((1090, 1129), 'astropy.coordinates.Angle', 'coord.Angle', (["(catalog['DEC_FIN'] * u.deg)"], {}), "(catalog['DEC_FIN'] * u.deg)\n", (1101, 1129), True, 'import astropy.coordinates as coord\n'), ((1143, 1164), 'astropy.wcs.WCS', 'wcs.WCS', (['image_header'], {}), '(image_header)\n', (1150, 1164), False, 'from astropy import wcs\n'), ((1554, 1564), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1562, 1564), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1625), 'mpld3.plugins.MousePosition', 'plugins.MousePosition', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (1612, 1625), False, 'from mpld3 import plugins\n'), ((2337, 2371), 'mpld3.plugins.MousePosition', 'plugins.MousePosition', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2358, 2371), False, 'from mpld3 import plugins\n'), ((2523, 2546), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(200)'], {}), '(-2, 2, 200)\n', (2534, 2546), True, 'import numpy as np\n'), ((2586, 2609), 'numpy.zeros', 'np.zeros', (['(200, 200, 4)'], {}), '((200, 200, 4))\n', (2594, 2609), True, 'import numpy as np\n'), ((2636, 2666), 'numpy.exp', 'np.exp', (['(-(x - 1) ** 2 - y ** 2)'], {}), '(-(x - 1) ** 2 - y ** 2)\n', (2642, 2666), True, 'import numpy as np\n'), ((2695, 2737), 'numpy.exp', 'np.exp', (['(-(x + 0.71) ** 2 - (y - 0.71) ** 2)'], {}), '(-(x + 0.71) ** 2 - (y - 0.71) ** 2)\n', (2701, 2737), True, 'import numpy as np\n'), ((2764, 2806), 'numpy.exp', 'np.exp', (['(-(x + 0.71) ** 2 - (y + 0.71) ** 2)'], {}), '(-(x + 0.71) ** 2 - (y + 0.71) ** 2)\n', (2770, 2806), True, 'import numpy as np\n'), ((2833, 2866), 'numpy.exp', 'np.exp', (['(-0.25 * (x ** 2 + y ** 2))'], {}), '(-0.25 * (x ** 2 + y ** 2))\n', (2839, 2866), True, 'import numpy as np\n'), ((3012, 3046), 'mpld3.plugins.MousePosition', 'plugins.MousePosition', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (3033, 3046), False, 'from mpld3 import plugins\n'), ((1198, 1225), 'numpy.column_stack', 'np.column_stack', (['(lon, lat)'], {}), '((lon, lat))\n', (1213, 1225), True, 'import numpy as np\n'), ((1947, 1961), 'numpy.log10', 'np.log10', (['np.e'], {}), '(np.e)\n', (1955, 1961), True, 'import numpy as np\n'), ((2058, 2072), 'numpy.log10', 'np.log10', (['np.e'], {}), '(np.e)\n', (2066, 2072), True, 'import numpy as np\n'), ((876, 893), 'astropy.wcs.WCS', 'WCS', (['image_header'], {}), '(image_header)\n', (879, 893), False, 'from astropy.wcs import WCS\n')] |
import os
import time
import logging
import sys
import torch
import numpy as np
import random
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def setup_logger(root_folder,exp_id):
if not os.path.exists(root_folder):
os.system(f"mkdir {root_folder}")
log_path = os.path.join(root_folder,exp_id)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level=logging.DEBUG)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(level=logging.INFO)
logger.addHandler(file_handler)
return logger
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def getGrad(m):
grads = {}
for name, params in m.named_parameters():
grad = params.grad
if grad is None:
continue
grad_array = grad.data.cpu().numpy()
grads[name] = np.abs(grad_array).mean()
return grads | [
"numpy.random.seed",
"logging.FileHandler",
"numpy.abs",
"torch.manual_seed",
"logging.StreamHandler",
"os.path.exists",
"os.system",
"torch.cuda.manual_seed_all",
"random.seed",
"os.path.join",
"logging.getLogger"
] | [((350, 383), 'os.path.join', 'os.path.join', (['root_folder', 'exp_id'], {}), '(root_folder, exp_id)\n', (362, 383), False, 'import os\n'), ((396, 423), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (413, 423), False, 'import logging\n'), ((486, 519), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (507, 519), False, 'import logging\n'), ((626, 655), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (645, 655), False, 'import logging\n'), ((783, 806), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (800, 806), False, 'import torch\n'), ((811, 843), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (837, 843), False, 'import torch\n'), ((848, 868), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (862, 868), True, 'import numpy as np\n'), ((873, 890), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (884, 890), False, 'import random\n'), ((264, 291), 'os.path.exists', 'os.path.exists', (['root_folder'], {}), '(root_folder)\n', (278, 291), False, 'import os\n'), ((301, 334), 'os.system', 'os.system', (['f"""mkdir {root_folder}"""'], {}), "(f'mkdir {root_folder}')\n", (310, 334), False, 'import os\n'), ((1155, 1173), 'numpy.abs', 'np.abs', (['grad_array'], {}), '(grad_array)\n', (1161, 1173), True, 'import numpy as np\n')] |
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tempfile
import numpy as np
from sklearn.preprocessing import StandardScaler
import fastestimator as fe
import tensorflow as tf
from fastestimator.op.tensorop import MeanSquaredError, ModelOp
from fastestimator.trace import ModelSaver
from tensorflow.keras import layers
def create_dnn():
model = tf.keras.Sequential()
model.add(layers.Dense(23, activation="relu", input_shape=(13, )))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(16, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(8, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation="linear"))
return model
def get_estimator(epochs=50, batch_size=32, steps_per_epoch=None, validation_steps=None, model_dir=tempfile.mkdtemp()):
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.boston_housing.load_data()
# step 1. prepare data
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_eval = scaler.transform(x_eval)
train_data = {"x": x_train, "y": np.expand_dims(y_train, -1)}
eval_data = {"x": x_eval, "y": np.expand_dims(y_eval, -1)}
data = {"train": train_data, "eval": eval_data}
pipeline = fe.Pipeline(batch_size=batch_size, data=data)
# step 2. prepare model
model = fe.build(model_def=create_dnn, model_name="dnn", optimizer="adam", loss_name="loss")
network = fe.Network(ops=[
ModelOp(inputs="x", model=model, outputs="y_pred"), MeanSquaredError(inputs=("y", "y_pred"), outputs="loss")
])
# step 3.prepare estimator
traces = [ModelSaver(model_name="dnn", save_dir=model_dir, save_best=True)]
estimator = fe.Estimator(network=network,
pipeline=pipeline,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
log_steps=10,
traces=traces)
return estimator
if __name__ == "__main__":
est = get_estimator()
est.fit()
| [
"tensorflow.keras.datasets.boston_housing.load_data",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"fastestimator.Estimator",
"numpy.expand_dims",
"fastestimator.op.tensorop.ModelOp",
"tempfile.mkdtemp",
"tensorflow.keras.Sequential",
... | [((1005, 1026), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (1024, 1026), True, 'import tensorflow as tf\n'), ((1476, 1494), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1492, 1494), False, 'import tempfile\n'), ((1540, 1584), 'tensorflow.keras.datasets.boston_housing.load_data', 'tf.keras.datasets.boston_housing.load_data', ([], {}), '()\n', (1582, 1584), True, 'import tensorflow as tf\n'), ((1626, 1642), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1640, 1642), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1921, 1966), 'fastestimator.Pipeline', 'fe.Pipeline', ([], {'batch_size': 'batch_size', 'data': 'data'}), '(batch_size=batch_size, data=data)\n', (1932, 1966), True, 'import fastestimator as fe\n'), ((2008, 2096), 'fastestimator.build', 'fe.build', ([], {'model_def': 'create_dnn', 'model_name': '"""dnn"""', 'optimizer': '"""adam"""', 'loss_name': '"""loss"""'}), "(model_def=create_dnn, model_name='dnn', optimizer='adam',\n loss_name='loss')\n", (2016, 2096), True, 'import fastestimator as fe\n'), ((2376, 2544), 'fastestimator.Estimator', 'fe.Estimator', ([], {'network': 'network', 'pipeline': 'pipeline', 'epochs': 'epochs', 'steps_per_epoch': 'steps_per_epoch', 'validation_steps': 'validation_steps', 'log_steps': '(10)', 'traces': 'traces'}), '(network=network, pipeline=pipeline, epochs=epochs,\n steps_per_epoch=steps_per_epoch, validation_steps=validation_steps,\n log_steps=10, traces=traces)\n', (2388, 2544), True, 'import fastestimator as fe\n'), ((1042, 1096), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(23)'], {'activation': '"""relu"""', 'input_shape': '(13,)'}), "(23, activation='relu', input_shape=(13,))\n", (1054, 1096), False, 'from tensorflow.keras import layers\n'), ((1113, 1132), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (1127, 1132), False, 'from tensorflow.keras import layers\n'), ((1148, 1183), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (1160, 1183), False, 'from tensorflow.keras import layers\n'), ((1199, 1218), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (1213, 1218), False, 'from tensorflow.keras import layers\n'), ((1234, 1268), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (1246, 1268), False, 'from tensorflow.keras import layers\n'), ((1284, 1303), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (1298, 1303), False, 'from tensorflow.keras import layers\n'), ((1319, 1355), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (1331, 1355), False, 'from tensorflow.keras import layers\n'), ((1762, 1789), 'numpy.expand_dims', 'np.expand_dims', (['y_train', '(-1)'], {}), '(y_train, -1)\n', (1776, 1789), True, 'import numpy as np\n'), ((1826, 1852), 'numpy.expand_dims', 'np.expand_dims', (['y_eval', '(-1)'], {}), '(y_eval, -1)\n', (1840, 1852), True, 'import numpy as np\n'), ((2294, 2358), 'fastestimator.trace.ModelSaver', 'ModelSaver', ([], {'model_name': '"""dnn"""', 'save_dir': 'model_dir', 'save_best': '(True)'}), "(model_name='dnn', save_dir=model_dir, save_best=True)\n", (2304, 2358), False, 'from fastestimator.trace import ModelSaver\n'), ((2132, 2182), 'fastestimator.op.tensorop.ModelOp', 'ModelOp', ([], {'inputs': '"""x"""', 'model': 'model', 'outputs': '"""y_pred"""'}), "(inputs='x', model=model, outputs='y_pred')\n", (2139, 2182), False, 'from fastestimator.op.tensorop import MeanSquaredError, ModelOp\n'), ((2184, 2240), 'fastestimator.op.tensorop.MeanSquaredError', 'MeanSquaredError', ([], {'inputs': "('y', 'y_pred')", 'outputs': '"""loss"""'}), "(inputs=('y', 'y_pred'), outputs='loss')\n", (2200, 2240), False, 'from fastestimator.op.tensorop import MeanSquaredError, ModelOp\n')] |
import numpy as np
import torch
from gym.spaces import Dict
from rllab.misc.instrument import VariantGenerator
import rlkit.torch.pytorch_util as ptu
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.torch.networks import MlpPolicy
from rlkit.envs import get_meta_env, get_meta_env_params_iters
from rlkit.torch.irl.np_bc import NeuralProcessBC
from rlkit.torch.irl.encoders.trivial_encoder import TrivialTrajEncoder, TrivialR2ZMap, TrivialNPEncoder
import yaml
import argparse
import importlib
import psutil
import os
from os import path
import argparse
import joblib
from time import sleep
EXPERT_LISTING_YAML_PATH = '/h/kamyar/oorl_rlkit/rlkit/torch/irl/experts.yaml'
def experiment(variant):
with open(EXPERT_LISTING_YAML_PATH, 'r') as f:
listings = yaml.load(f.read())
expert_dir = listings[variant['expert_name']]['exp_dir']
specific_run = listings[variant['expert_name']]['seed_runs'][variant['expert_seed_run_idx']]
file_to_load = path.join(expert_dir, specific_run, 'extra_data.pkl')
extra_data = joblib.load(file_to_load)
# this script is for the non-meta-learning GAIL
train_context_buffer, train_test_buffer = extra_data['meta_train']['context'], extra_data['meta_train']['test']
test_context_buffer, test_test_buffer = extra_data['meta_test']['context'], extra_data['meta_test']['test']
# set up the envs
env_specs = variant['env_specs']
meta_train_env, meta_test_env = get_meta_env(env_specs)
# student policy should not have access to any task information
print(variant['algo_params'].keys())
meta_train_env.policy_uses_pixels = variant['algo_params']['policy_uses_pixels']
meta_train_env.policy_uses_task_params = False
meta_train_env.concat_task_params_to_policy_obs = False
meta_test_env.policy_uses_pixels = variant['algo_params']['policy_uses_pixels']
meta_test_env.policy_uses_task_params = False
meta_test_env.concat_task_params_to_policy_obs = False
# set up the policy and training algorithm
if isinstance(meta_train_env.observation_space, Dict):
if variant['algo_params']['policy_uses_pixels']:
raise NotImplementedError('Not implemented pixel version of things!')
else:
obs_dim = int(np.prod(meta_train_env.observation_space.spaces['obs'].shape))
else:
obs_dim = int(np.prod(meta_train_env.observation_space.shape))
action_dim = int(np.prod(meta_train_env.action_space.shape))
print('obs dim: %d' % obs_dim)
print('act dim: %d' % action_dim)
sleep(3)
policy_net_size = variant['algo_params']['policy_net_size']
policy_num_layers = variant['algo_params']['policy_num_layers']
hidden_sizes = [policy_net_size] * policy_num_layers
# policy = MlpPolicy(
# [policy_net_size, policy_net_size],
# action_dim,
# obs_dim + variant['algo_params']['np_params']['z_dim'],
# hidden_activation=torch.nn.functional.tanh,
# layer_norm=variant['algo_params']['use_layer_norm']
# )
policy = MlpPolicy(
hidden_sizes,
action_dim,
obs_dim + variant['algo_params']['np_params']['z_dim'],
# hidden_activation=torch.nn.functional.relu,
hidden_activation=torch.nn.functional.tanh,
output_activation=torch.nn.functional.tanh,
layer_norm=variant['algo_params']['use_layer_norm']
# batch_norm=True
)
# Make the neural process
# in the initial version we are assuming all trajectories have the same length
timestep_enc_params = variant['algo_params']['np_params']['traj_enc_params']['timestep_enc_params']
traj_enc_params = variant['algo_params']['np_params']['traj_enc_params']['traj_enc_params']
timestep_enc_params['input_size'] = obs_dim + action_dim
traj_samples, _ = train_context_buffer.sample_trajs(1, num_tasks=1)
len_context_traj = traj_samples[0][0]['observations'].shape[0]
len_context_traj = 5
traj_enc_params['input_size'] = timestep_enc_params['output_size'] * len_context_traj
traj_enc = TrivialTrajEncoder(
timestep_enc_params,
traj_enc_params
)
trunk_params = variant['algo_params']['np_params']['r2z_map_params']['trunk_params']
trunk_params['input_size'] = traj_enc.output_size
split_params = variant['algo_params']['np_params']['r2z_map_params']['split_heads_params']
split_params['input_size'] = trunk_params['output_size']
split_params['output_size'] = variant['algo_params']['np_params']['z_dim']
r2z_map = TrivialR2ZMap(
trunk_params,
split_params
)
np_enc = TrivialNPEncoder(
variant['algo_params']['np_params']['np_enc_params']['agg_type'],
traj_enc,
r2z_map
)
train_task_params_sampler, test_task_params_sampler = get_meta_env_params_iters(env_specs)
algorithm = NeuralProcessBC(
meta_test_env, # env is the test env, training_env is the training env (following rlkit original setup)
policy,
train_context_buffer,
train_test_buffer,
test_context_buffer,
test_test_buffer,
np_enc,
train_task_params_sampler=train_task_params_sampler,
test_task_params_sampler=test_task_params_sampler,
training_env=meta_train_env, # the env used for generating trajectories
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
return 1
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
if exp_specs['use_gpu']:
ptu.set_gpu_mode(True)
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = exp_specs['seed']
set_seed(seed)
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
experiment(exp_specs)
| [
"yaml.load",
"argparse.ArgumentParser",
"rlkit.torch.irl.encoders.trivial_encoder.TrivialNPEncoder",
"rlkit.torch.pytorch_util.gpu_enabled",
"rlkit.envs.get_meta_env",
"rlkit.envs.get_meta_env_params_iters",
"rlkit.launchers.launcher_util.set_seed",
"time.sleep",
"numpy.prod",
"rlkit.torch.irl.enc... | [((1045, 1098), 'os.path.join', 'path.join', (['expert_dir', 'specific_run', '"""extra_data.pkl"""'], {}), "(expert_dir, specific_run, 'extra_data.pkl')\n", (1054, 1098), False, 'from os import path\n'), ((1116, 1141), 'joblib.load', 'joblib.load', (['file_to_load'], {}), '(file_to_load)\n', (1127, 1141), False, 'import joblib\n'), ((1519, 1542), 'rlkit.envs.get_meta_env', 'get_meta_env', (['env_specs'], {}), '(env_specs)\n', (1531, 1542), False, 'from rlkit.envs import get_meta_env, get_meta_env_params_iters\n'), ((2616, 2624), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (2621, 2624), False, 'from time import sleep\n'), ((3112, 3358), 'rlkit.torch.networks.MlpPolicy', 'MlpPolicy', (['hidden_sizes', 'action_dim', "(obs_dim + variant['algo_params']['np_params']['z_dim'])"], {'hidden_activation': 'torch.nn.functional.tanh', 'output_activation': 'torch.nn.functional.tanh', 'layer_norm': "variant['algo_params']['use_layer_norm']"}), "(hidden_sizes, action_dim, obs_dim + variant['algo_params'][\n 'np_params']['z_dim'], hidden_activation=torch.nn.functional.tanh,\n output_activation=torch.nn.functional.tanh, layer_norm=variant[\n 'algo_params']['use_layer_norm'])\n", (3121, 3358), False, 'from rlkit.torch.networks import MlpPolicy\n'), ((4129, 4185), 'rlkit.torch.irl.encoders.trivial_encoder.TrivialTrajEncoder', 'TrivialTrajEncoder', (['timestep_enc_params', 'traj_enc_params'], {}), '(timestep_enc_params, traj_enc_params)\n', (4147, 4185), False, 'from rlkit.torch.irl.encoders.trivial_encoder import TrivialTrajEncoder, TrivialR2ZMap, TrivialNPEncoder\n'), ((4611, 4652), 'rlkit.torch.irl.encoders.trivial_encoder.TrivialR2ZMap', 'TrivialR2ZMap', (['trunk_params', 'split_params'], {}), '(trunk_params, split_params)\n', (4624, 4652), False, 'from rlkit.torch.irl.encoders.trivial_encoder import TrivialTrajEncoder, TrivialR2ZMap, TrivialNPEncoder\n'), ((4693, 4799), 'rlkit.torch.irl.encoders.trivial_encoder.TrivialNPEncoder', 'TrivialNPEncoder', (["variant['algo_params']['np_params']['np_enc_params']['agg_type']", 'traj_enc', 'r2z_map'], {}), "(variant['algo_params']['np_params']['np_enc_params'][\n 'agg_type'], traj_enc, r2z_map)\n", (4709, 4799), False, 'from rlkit.torch.irl.encoders.trivial_encoder import TrivialTrajEncoder, TrivialR2ZMap, TrivialNPEncoder\n'), ((4884, 4920), 'rlkit.envs.get_meta_env_params_iters', 'get_meta_env_params_iters', (['env_specs'], {}), '(env_specs)\n', (4909, 4920), False, 'from rlkit.envs import get_meta_env, get_meta_env_params_iters\n'), ((4937, 5239), 'rlkit.torch.irl.np_bc.NeuralProcessBC', 'NeuralProcessBC', (['meta_test_env', 'policy', 'train_context_buffer', 'train_test_buffer', 'test_context_buffer', 'test_test_buffer', 'np_enc'], {'train_task_params_sampler': 'train_task_params_sampler', 'test_task_params_sampler': 'test_task_params_sampler', 'training_env': 'meta_train_env'}), "(meta_test_env, policy, train_context_buffer,\n train_test_buffer, test_context_buffer, test_test_buffer, np_enc,\n train_task_params_sampler=train_task_params_sampler,\n test_task_params_sampler=test_task_params_sampler, training_env=\n meta_train_env, **variant['algo_params'])\n", (4952, 5239), False, 'from rlkit.torch.irl.np_bc import NeuralProcessBC\n'), ((5461, 5478), 'rlkit.torch.pytorch_util.gpu_enabled', 'ptu.gpu_enabled', ([], {}), '()\n', (5476, 5478), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5599, 5624), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5622, 5624), False, 'import argparse\n'), ((6042, 6056), 'rlkit.launchers.launcher_util.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (6050, 6056), False, 'from rlkit.launchers.launcher_util import setup_logger, set_seed\n'), ((6061, 6130), 'rlkit.launchers.launcher_util.setup_logger', 'setup_logger', ([], {'exp_prefix': 'exp_prefix', 'exp_id': 'exp_id', 'variant': 'exp_specs'}), '(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)\n', (6073, 6130), False, 'from rlkit.launchers.launcher_util import setup_logger, set_seed\n'), ((2494, 2536), 'numpy.prod', 'np.prod', (['meta_train_env.action_space.shape'], {}), '(meta_train_env.action_space.shape)\n', (2501, 2536), True, 'import numpy as np\n'), ((5849, 5871), 'yaml.load', 'yaml.load', (['spec_string'], {}), '(spec_string)\n', (5858, 5871), False, 'import yaml\n'), ((5914, 5936), 'rlkit.torch.pytorch_util.set_gpu_mode', 'ptu.set_gpu_mode', (['(True)'], {}), '(True)\n', (5930, 5936), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((2424, 2471), 'numpy.prod', 'np.prod', (['meta_train_env.observation_space.shape'], {}), '(meta_train_env.observation_space.shape)\n', (2431, 2471), True, 'import numpy as np\n'), ((2329, 2390), 'numpy.prod', 'np.prod', (["meta_train_env.observation_space.spaces['obs'].shape"], {}), "(meta_train_env.observation_space.spaces['obs'].shape)\n", (2336, 2390), True, 'import numpy as np\n')] |
# Copyright 2013-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from . import * # noqa
from stella.intrinsics.python import zeros
import stella
from .basicmath import addition, subtraction
from . import basicmath
def direct_assignment(x, y):
a = x
return a + y
def simple_assignment(x, y):
a = x + y
return a
def return_const():
return 41
def assign_const():
r = 42
return r
def double_assignment(x, y):
a = x
b = 5 + y
a += b
return a
def double_cast(x, y):
a = x / y
b = y // x
return a + b
def simple_if(x):
if x:
return 0
else:
return 42
def simple_ifeq(x, y):
if x == y:
return 0
else:
return 42
def simple_ifeq_const(x):
if x == False: # noqa TODO: support `is' here!
return 0
else:
return 42
def op_not(x):
return not x
def for1(x):
r = 0
for i in range(x):
r += i
return r
def for2(x):
r = 0
s = 1
for i in range(x):
r += i
s *= 2
return r + s
def for_loop_var(x):
for i in range(x):
x = i
return x
def for3(a):
r = 0
for x in a:
r += x
return r
def while1(x):
r = 0
while x > 0:
r += x
x -= 1
return r
def recursive(x):
if x <= 0:
return 1
else:
return x + recursive(x - 1)
def fib(x):
if x <= 2:
return 1
return fib(x - 1) + fib(x - 2)
def fib_nonrecursive(n):
if n == 0:
return 1
if n == 1:
return 1
grandparent = 1
parent = 1
me = 0 # required for stella only
for i in range(2, n):
me = parent + grandparent
grandparent = parent
parent = me
return me
def hof_f(n):
if n == 0:
return 1
else:
return n - hof_m(hof_f(n - 1))
def hof_m(n):
if n == 0:
return 0
else:
return n - hof_f(hof_m(n - 1))
def and_(a, b):
return a and b
def or_(a, b):
return a or b
some_global = 0
def use_global():
global some_global
some_global = 0
x = 5
while some_global == 0:
x = global_test_worker(x)
return x
def global_test_worker(x):
global some_global
if x < 0:
some_global = 1
return x - 1
def new_global_const():
global prev_undefined
prev_undefined = 1
def new_global_var(x):
global prev_undefined
prev_undefined = x
return prev_undefined # TODO: / 2 fails!
def kwargs(a=0, b=1):
return a + b
def kwargs_call1(x):
return kwargs(a=x)
def kwargs_call2(x):
return kwargs(b=x)
def kwargs_call3(x):
return kwargs(a=1, b=x)
def kwargs_call4(x):
return kwargs(a=x, b=x)
def return_without_init(x, y):
if y > 0:
return addition(x, y)
else:
return subtraction(x, y)
def ext_call(x):
return basicmath.subtraction(0, x)
def array_allocation():
a = zeros(5, dtype=int) # noqa
return 0
def array_allocation_reg():
"""
Since memory allocation is not a focus right now,
this test will be skipped indefinitely.
"""
l = 2
a = zeros(l, dtype=int) # noqa
return 0
def array_alloc_assignment():
a = zeros(5, dtype=int)
i = 0
a[0] = i
def array_alloc_assignment2():
a = zeros(5, dtype=int)
for i in range(5):
a[i] = 42
def array_alloc_assignment3():
a = zeros(5, dtype=int)
for i in range(5):
a[i] = i + 1
def void():
pass
def call_void():
void()
return 1
def array_alloc_use():
a = zeros(5, dtype=int)
a[0] = 1
return a[0]
def array_alloc_use2():
a = zeros(5, dtype=int)
for i in range(5):
a[i] = i ** 2
r = 0
for i in range(5):
r += a[i]
return r
def array_len():
a = zeros(5, dtype=int)
return len(a)
def numpy_array(a):
a[1] = 4
a[2] = 2
a[3] = -1
def numpy_assign(a):
b = a
b[1] = 4
def numpy_len_indirect(a):
l = len(a)
for i in range(l):
a[i] = i + 1
def numpy_len_direct(a):
for i in range(len(a)):
a[i] = i + 1
def numpy_passing(a):
a[0] = 3
a[2] = 1
numpy_receiving(a)
def numpy_receiving(a):
l = len(a)
for i in range(l):
if a[i] > 0:
a[i] += 1
def numpy_global():
global numpy_global_var
numpy_global_var[3] = 4
numpy_global_var[4] = 2
def numpy_array2d1(a):
a[0, 0] = 1
a[0, 1] = 2
a[1, 0] = 3
a[1, 1] = 4
def numpy_array2d2(a):
return a[0, 0] * a[1, 1] + a[1, 0] * a[0, 1]
def numpy_array2d_for1(a):
r = 0
for i in range(2):
for j in range(2):
r += a[i, j]
return r
def numpy_array2d_shape(a):
return a.shape
def numpy_array2d_for2(a):
maxx = a.shape[0]
maxy = a.shape[1]
r = 0
for i in range(maxx):
for j in range(maxy):
r += 1
return r
def numpy_array2d_for3(a, b):
maxx = a.shape[0]
maxy = a.shape[1]
r = 0
for i in range(maxx):
for j in range(maxy):
r += 1
b[i, j] += r
return r
def numpy_array2d_for4(a):
maxx = a.shape[0]
maxy = a.shape[1]
r = 0
for i in range(maxx):
for j in range(maxy):
r += a[i, j]
return r
def return_2():
return 2
def if_func_call():
return return_2() > 1
def numpy_func_limit(a):
for i in range(return_2()):
a[i] = i + 1
def return_tuple():
return (4, 2)
def first(t):
return t[0]
def callFirst():
t = (4, 2)
return first(t)
def second(t):
return t[1]
def firstPlusSecond():
t = (4, 2)
return first(t) + second(t)
def getReturnedTuple1():
t = return_tuple()
return first(t)
def getReturnedTuple2():
x, _ = return_tuple()
return x
def switchTuple():
x, y = (1, 2)
y, x = x, y
return x - y
def createTuple1():
x = 1
t1 = (x, -1)
return t1
def createTuple2():
x = 7
t2 = (-2, x)
return t2
def createTuple3():
x = 1
t1 = (x, -1)
t2 = (t1[1], x)
return t1[0], t2[0]
def iterateTuple():
t = (4, 6, 8, 10)
r = 0
for i in t:
r += i
return r
def addTuple(t):
return t[0] + t[1]
def bitwise_and(a, b):
return a & b
def bitwise_or(a, b):
return a | b
def bitwise_xor(a, b):
return a ^ b
def tuple_me(a):
return tuple(a)
def lt(x, y):
return x < y
def gt(x, y):
return x > y
def le(x, y):
return x <= y
def ge(x, y):
return x >= y
def ne(x, y):
return x != y
def eq(x, y):
return x == y
###
@mark.parametrize('args', [(40, 2), (43, -1), (41, 1)])
@mark.parametrize('f', [direct_assignment, simple_assignment, double_assignment, double_cast,
return_without_init])
def test1(f, args):
make_eq_test(f, args)
@mark.parametrize('args', [(True, True), (True, False), (False, True), (False, False)])
@mark.parametrize('f', [and_, or_])
def test2(f, args):
make_eq_test(f, args)
@mark.parametrize('arg', single_args([True, False]))
@mark.parametrize('f', [simple_if, simple_ifeq_const, op_not])
def test3(f, arg):
make_eq_test(f, arg)
@mark.parametrize('args', [(True, False), (True, True), (4, 2), (4.0, 4.0)])
@mark.parametrize('f', [simple_ifeq])
def test4(f, args):
make_eq_test(f, args)
@mark.parametrize('f', [return_const, assign_const, use_global, array_allocation,
array_alloc_assignment, array_alloc_assignment2, array_alloc_assignment3,
void, call_void, array_alloc_use, array_alloc_use2, array_len,
if_func_call])
def test5(f):
make_eq_test(f, ())
@mark.parametrize('f', [array_allocation_reg])
@unimplemented
def test5b(f):
make_eq_test(f, ())
@mark.parametrize('arg', single_args([0, 1, 2, 3, 42, -1, -42]))
@mark.parametrize('f', [for1, for2, for_loop_var, while1, recursive, ext_call, kwargs_call1,
kwargs_call2, kwargs_call3, kwargs_call4, op_not])
def test6(f, arg):
make_eq_test(f, arg)
@mark.parametrize('arg', single_args([0, 1, 2, 5, 8, -1, -3]))
@mark.parametrize('f', [fib, fib_nonrecursive])
def test7(f, arg):
make_eq_test(f, arg)
@mark.parametrize('f', [kwargs])
def test8(f):
make_eq_test(f, (1, 30))
@mark.parametrize('arg', single_args([0, 1, 2, 5, 8, 12]))
@mark.parametrize('f', [hof_f])
def test9(f, arg):
make_eq_test(f, arg)
@mark.parametrize('args', [{'a': 1}, {'b': 2}, {'a': 1, 'b': 0}, {'b': 1, 'a': 0}, {'a': 1.2},
{'b': -3}, {}])
def test10(args):
make_eq_kw_test(kwargs, args)
@mark.parametrize('args', [{'c': 5}, {'b': -1, 'c': 5}])
@mark.xfail()
def test11(args):
make_eq_kw_test(kwargs, args)
@mark.parametrize('arg', single_args([np.zeros(5, dtype=int)]))
@mark.parametrize('f', [numpy_array, numpy_len_indirect, numpy_receiving, numpy_passing,
numpy_len_direct, numpy_assign])
def test12(f, arg):
make_eq_test(f, arg)
@mark.parametrize('arg', single_args([np.zeros(5, dtype=int)]))
@mark.parametrize('f', [])
@unimplemented
def test12u(f, arg):
make_eq_test(f, arg)
def test13():
global numpy_global_var
orig = np.zeros(5, dtype=int)
numpy_global_var = np.array(orig)
py = numpy_global()
py_res = numpy_global_var
numpy_global_var = orig
st = stella.wrap(numpy_global)()
st_res = numpy_global_var
assert py == st
assert all(py_res == st_res)
def test13b():
"""Global scalars are currently not updated in Python when their value changes in Stella"""
global some_global
some_global = 0
py = use_global()
assert some_global == 1
some_global = 0
st = stella.wrap(use_global)()
assert some_global == 0
assert py == st
def test13c():
"""Defining a new (i.e. not in Python initialized) global variable
and initialize it with a constant
"""
global prev_undefined
assert 'prev_undefined' not in globals()
py = new_global_const()
assert 'prev_undefined' in globals()
del prev_undefined
assert 'prev_undefined' not in globals()
st = stella.wrap(new_global_const)()
# Note: currently no variable updates are transfered back to Python
assert 'prev_undefined' not in globals()
assert py == st
def test13d():
"""Defining a new (i.e. not in Python initialized) global variable
and initialize it with another variable
"""
global prev_undefined
assert 'prev_undefined' not in globals()
py = new_global_var(42)
assert 'prev_undefined' in globals()
del prev_undefined
assert 'prev_undefined' not in globals()
st = stella.wrap(new_global_var)(42)
# Note: currently no variable updates are transfered back to Python
assert 'prev_undefined' not in globals()
assert py == st
@mark.parametrize('f', [callFirst, firstPlusSecond, getReturnedTuple1,
getReturnedTuple2, return_tuple, switchTuple,
createTuple1, createTuple2, createTuple3])
def test14(f):
make_eq_test(f, ())
@mark.parametrize('f', [iterateTuple])
@unimplemented
def test14_u(f):
make_eq_test(f, ())
@mark.parametrize('arg', single_args([(10, 20), (4.0, 2.0), (13.0, 14)]))
@mark.parametrize('f', [addTuple])
def test15(f, arg):
make_eq_test(f, arg)
@mark.parametrize('arg', single_args([np.array([1, 2, 5, 7]), np.array([-1, -2, 0, 45]),
np.array([1.0, 9.0, -3.14, 0.0001, 11111.0])]))
@mark.parametrize('f', [for3])
def test16(f, arg):
make_numpy_eq_test(f, arg)
array2d_args = single_args([np.zeros((2, 2), dtype=int),
np.array([[4, 3], [2, -1]]),
np.array([[1.5, 2.5, 5.5], [-3.3, -5.7, 1.1]]),
np.array([[42.0, 4.2], [5, 7], [0, 123]])
])
@mark.parametrize('arg', array2d_args)
@mark.parametrize('f', [numpy_array2d1, numpy_array2d2, numpy_array2d_for1, numpy_array2d_for2,
numpy_array2d_for4])
def test17(f, arg):
make_numpy_eq_test(f, arg)
@mark.parametrize('arg', array2d_args)
@mark.parametrize('f', [])
@unimplemented
def test17u(f, arg):
make_numpy_eq_test(f, arg)
@mark.parametrize('arg', array2d_args)
@mark.parametrize('f', [numpy_array2d_for3])
def test18(f, arg):
arg2 = np.zeros(arg[0].shape)
make_numpy_eq_test(f, (arg[0], arg2))
@mark.parametrize('args', [(40, 2), (43, 1), (42, 3), (0, 0), (2, 2), (3, 3), (3, 4), (4, 7),
(True, True), (True, False), (False, False), (False, True)])
@mark.parametrize('f', [bitwise_and, bitwise_or, bitwise_xor])
def test19(f, args):
make_eq_test(f, args)
# TODO Who needs arrays longer than 2?
#<EMAIL>('arg', single_args([np.zeros(5, dtype=int), np.zeros(3), np.array([1, 2, 42]),
# np.array([0.0, 3.0])]))
@mark.parametrize('arg', single_args([np.zeros(2, dtype=int), np.zeros(2), np.array([1, 42]),
np.array([0.0, 3.0])]))
@mark.parametrize('f', [tuple_me])
def test20(f, arg):
make_numpy_eq_test(f, arg)
@mark.parametrize('args', [(40, 2), (43, 1), (42, 3), (0, 0), (2, 2), (3, 3), (3, 4), (4, 7),
(1.0, 0), (1.2, 2.0), (1, 2.3)])
@mark.parametrize('f', [lt, gt, eq, le, ge, ne])
def test19(f, args):
make_eq_test(f, args)
| [
"numpy.array",
"numpy.zeros",
"stella.intrinsics.python.zeros",
"stella.wrap"
] | [((3469, 3488), 'stella.intrinsics.python.zeros', 'zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3474, 3488), False, 'from stella.intrinsics.python import zeros\n'), ((3672, 3691), 'stella.intrinsics.python.zeros', 'zeros', (['l'], {'dtype': 'int'}), '(l, dtype=int)\n', (3677, 3691), False, 'from stella.intrinsics.python import zeros\n'), ((3753, 3772), 'stella.intrinsics.python.zeros', 'zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3758, 3772), False, 'from stella.intrinsics.python import zeros\n'), ((3837, 3856), 'stella.intrinsics.python.zeros', 'zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3842, 3856), False, 'from stella.intrinsics.python import zeros\n'), ((3939, 3958), 'stella.intrinsics.python.zeros', 'zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3944, 3958), False, 'from stella.intrinsics.python import zeros\n'), ((4102, 4121), 'stella.intrinsics.python.zeros', 'zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4107, 4121), False, 'from stella.intrinsics.python import zeros\n'), ((4185, 4204), 'stella.intrinsics.python.zeros', 'zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4190, 4204), False, 'from stella.intrinsics.python import zeros\n'), ((4341, 4360), 'stella.intrinsics.python.zeros', 'zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4346, 4360), False, 'from stella.intrinsics.python import zeros\n'), ((9777, 9799), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (9785, 9799), True, 'import numpy as np\n'), ((9824, 9838), 'numpy.array', 'np.array', (['orig'], {}), '(orig)\n', (9832, 9838), True, 'import numpy as np\n'), ((12948, 12970), 'numpy.zeros', 'np.zeros', (['arg[0].shape'], {}), '(arg[0].shape)\n', (12956, 12970), True, 'import numpy as np\n'), ((9931, 9956), 'stella.wrap', 'stella.wrap', (['numpy_global'], {}), '(numpy_global)\n', (9942, 9956), False, 'import stella\n'), ((10280, 10303), 'stella.wrap', 'stella.wrap', (['use_global'], {}), '(use_global)\n', (10291, 10303), False, 'import stella\n'), ((10709, 10738), 'stella.wrap', 'stella.wrap', (['new_global_const'], {}), '(new_global_const)\n', (10720, 10738), False, 'import stella\n'), ((11239, 11266), 'stella.wrap', 'stella.wrap', (['new_global_var'], {}), '(new_global_var)\n', (11250, 11266), False, 'import stella\n'), ((12200, 12227), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'int'}), '((2, 2), dtype=int)\n', (12208, 12227), True, 'import numpy as np\n'), ((12257, 12284), 'numpy.array', 'np.array', (['[[4, 3], [2, -1]]'], {}), '([[4, 3], [2, -1]])\n', (12265, 12284), True, 'import numpy as np\n'), ((12314, 12360), 'numpy.array', 'np.array', (['[[1.5, 2.5, 5.5], [-3.3, -5.7, 1.1]]'], {}), '([[1.5, 2.5, 5.5], [-3.3, -5.7, 1.1]])\n', (12322, 12360), True, 'import numpy as np\n'), ((12390, 12431), 'numpy.array', 'np.array', (['[[42.0, 4.2], [5, 7], [0, 123]]'], {}), '([[42.0, 4.2], [5, 7], [0, 123]])\n', (12398, 12431), True, 'import numpy as np\n'), ((9350, 9372), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (9358, 9372), True, 'import numpy as np\n'), ((9607, 9629), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (9615, 9629), True, 'import numpy as np\n'), ((11951, 11973), 'numpy.array', 'np.array', (['[1, 2, 5, 7]'], {}), '([1, 2, 5, 7])\n', (11959, 11973), True, 'import numpy as np\n'), ((11975, 12000), 'numpy.array', 'np.array', (['[-1, -2, 0, 45]'], {}), '([-1, -2, 0, 45])\n', (11983, 12000), True, 'import numpy as np\n'), ((12040, 12084), 'numpy.array', 'np.array', (['[1.0, 9.0, -3.14, 0.0001, 11111.0]'], {}), '([1.0, 9.0, -3.14, 0.0001, 11111.0])\n', (12048, 12084), True, 'import numpy as np\n'), ((13536, 13558), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'int'}), '(2, dtype=int)\n', (13544, 13558), True, 'import numpy as np\n'), ((13560, 13571), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (13568, 13571), True, 'import numpy as np\n'), ((13573, 13590), 'numpy.array', 'np.array', (['[1, 42]'], {}), '([1, 42])\n', (13581, 13590), True, 'import numpy as np\n'), ((13629, 13649), 'numpy.array', 'np.array', (['[0.0, 3.0]'], {}), '([0.0, 3.0])\n', (13637, 13649), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import contextlib
import difflib
import functools
import inspect
import json
import os
import signal
import sys
import textwrap
import traceback
import unicodedata
import warnings
from .exceptions import AstropyDeprecationWarning, AstropyPendingDeprecationWarning
from ..extern import six
from ..extern.six.moves import urllib
__all__ = ['find_current_module', 'isiterable', 'deprecated', 'lazyproperty',
'deprecated_attribute', 'silence', 'format_exception',
'NumpyRNGContext', 'find_api_page', 'is_path_hidden',
'walk_skip_hidden', 'JsonCustomEncoder', 'indent']
__doctest_skip__ = ['find_current_module']
def find_current_module(depth=1, finddiff=False):
""" Determines the module/package from which this function is called.
This function has two modes, determined by the `finddiff` option. it
will either simply go the requested number of frames up the call
stack (if `finddiff` is False), or it will go up the call stack until
it reaches a module that is *not* in a specified set.
Parameters
----------
depth : int
Specifies how far back to go in the call stack (0-indexed, so that
passing in 0 gives back `astropy.utils.misc`).
finddiff : bool or list
If False, the returned `mod` will just be `depth` frames up from
the current frame. Otherwise, the function will start at a frame
`depth` up from current, and continue up the call stack to the
first module that is *different* from those in the provided list.
In this case, `finddiff` can be a list of modules or modules
names. Alternatively, it can be True, which will use the module
`depth` call stack frames up as the module the returned module
most be different from.
Returns
-------
mod : module or None
The module object or None if the package cannot be found. The name of
the module is available as the ``__name__`` attribute of the returned
object (if it isn't None).
Raises
------
ValueError
If `finddiff` is a list with an invalid entry.
Examples
--------
The examples below assume that there are two modules in a package named
`pkg`. ``mod1.py``::
def find1():
from astropy.utils import find_current_module
print find_current_module(1).__name__
def find2():
from astropy.utils import find_current_module
cmod = find_current_module(2)
if cmod is None:
print 'None'
else:
print cmod.__name__
def find_diff():
from astropy.utils import find_current_module
print find_current_module(0,True).__name__
``mod2.py``::
def find():
from .mod1 import find2
find2()
With these modules in place, the following occurs::
>>> from pkg import mod1, mod2
>>> from astropy.utils import find_current_module
>>> mod1.find1()
pkg.mod1
>>> mod1.find2()
None
>>> mod2.find()
pkg.mod2
>>> find_current_module(0)
<module 'astropy.utils.misc' from 'astropy/utils/misc.py'>
>>> mod1.find_diff()
pkg.mod1
"""
# using a patched version of getmodule because the py 3.1 and 3.2 stdlib
# is broken if the list of modules changes during import
from .compat import inspect_getmodule
frm = inspect.currentframe()
for i in range(depth):
frm = frm.f_back
if frm is None:
return None
if finddiff:
currmod = inspect_getmodule(frm)
if finddiff is True:
diffmods = [currmod]
else:
diffmods = []
for fd in finddiff:
if inspect.ismodule(fd):
diffmods.append(fd)
elif isinstance(fd, six.string_types):
diffmods.append(__import__(fd))
elif fd is True:
diffmods.append(currmod)
else:
raise ValueError('invalid entry in finddiff')
while frm:
frmb = frm.f_back
modb = inspect_getmodule(frmb)
if modb not in diffmods:
return modb
frm = frmb
else:
return inspect_getmodule(frm)
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = '\n'.join(' ' * (width * shift) + l if l else ''
for l in s.splitlines())
if s[-1] == '\n':
indented += '\n'
return indented
class lazyproperty(object):
"""
Works similarly to property(), but computes the value only once.
This essentially memoizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest(object):
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
If a setter for this property is defined, it will still be possible to
manually update the value of the property, if that capability is desired.
Adapted from the recipe at
http://code.activestate.com/recipes/363602-lazy-property-evaluation
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
self._fget = fget
self._fset = fset
self._fdel = fdel
if doc is None:
self.__doc__ = fget.__doc__
else:
self.__doc__ = doc
def __get__(self, obj, owner=None):
if obj is None:
return self
key = self._fget.__name__
if key not in obj.__dict__:
val = self._fget(obj)
obj.__dict__[key] = val
return val
else:
return obj.__dict__[key]
def __set__(self, obj, val):
obj_dict = obj.__dict__
func_name = self._fget.__name__
if self._fset:
ret = self._fset(obj, val)
if ret is not None and obj_dict.get(func_name) is ret:
# By returning the value set the setter signals that it took
# over setting the value in obj.__dict__; this mechanism allows
# it to override the input value
return
obj_dict[func_name] = val
def __delete__(self, obj):
if self._fdel:
self._fdel(obj)
key = self._fget.__name__
if key in obj.__dict__:
del obj.__dict__[key]
def getter(self, fget):
return self.__ter(fget, 0)
def setter(self, fset):
return self.__ter(fset, 1)
def deleter(self, fdel):
return self.__ter(fdel, 2)
def __ter(self, f, arg):
args = [self._fget, self._fset, self._fdel, self.__doc__]
args[arg] = f
cls_ns = sys._getframe(1).f_locals
for k, v in six.iteritems(cls_ns):
if v is self:
property_name = k
break
cls_ns[property_name] = lazyproperty(*args)
return cls_ns[property_name]
# TODO: Provide a class deprecation marker as well.
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type='function'):
"""
Used to mark a function as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
"""
def deprecate(func, message=message, name=name, alternative=alternative,
pending=pending):
if isinstance(func, classmethod):
try:
func = func.__func__
except AttributeError:
# classmethods in Python2.6 and below lack the __func__
# attribute so we need to hack around to get it
method = func.__get__(None, object)
if hasattr(method, '__func__'):
func = method.__func__
elif hasattr(method, 'im_func'):
func = method.im_func
else:
# Nothing we can do really... just return the original
# classmethod
return func
is_classmethod = True
else:
is_classmethod = False
if not name:
name = func.__name__
altmessage = ''
if not message or type(message) == type(deprecate):
if pending:
message = ('The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = ('The %(func)s %(obj_type)s is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = '\n Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type}) +
altmessage)
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = AstropyDeprecationWarning
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
old_doc = deprecated_func.__doc__
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
altmessage = altmessage.strip()
if not altmessage:
altmessage = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': altmessage.strip()}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
deprecated_func.__doc__ = new_doc
if is_classmethod:
deprecated_func = classmethod(deprecated_func)
return deprecated_func
if type(message) == type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. `self._name`).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the attribute,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
@deprecated(since, name=name, obj_type='attribute')
def get(self):
return getattr(self, private_name)
@deprecated(since, name=name, obj_type='attribute')
def set(self, val):
setattr(self, private_name, val)
@deprecated(since, name=name, obj_type='attribute')
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
class _DummyFile(object):
"""A noop writeable object."""
def write(self, s):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
yield
sys.stdout = old_stdout
sys.stderr = old_stderr
def format_exception(msg, *args, **kwargs):
"""
Given an exception message string, uses new-style formatting arguments
``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in
information about the exception that occurred. For example:
try:
1/0
except:
raise ZeroDivisionError(
format_except('A divide by zero occurred in {filename} at '
'line {lineno} of function {func}.'))
Any additional positional or keyword arguments passed to this function are
also used to format the message.
.. note::
This uses `sys.exc_info` to gather up the information needed to
fill in the formatting arguments. Python 2.x and 3.x have slightly
different behavior regarding `sys.exc_info` (the latter will not carry
it outside a handled exception), so it's not wise to use this outside of
an `except` clause - if it is, this will substitute '<unkown>' for the 4
formatting arguments.
"""
tb = traceback.extract_tb(sys.exc_info()[2], limit=1)
if len(tb) > 0:
filename, lineno, func, text = tb[0]
else:
filename = lineno = func = text = '<unknown>'
return msg.format(*args, filename=filename, lineno=lineno, func=func,
text=text, **kwargs)
class NumpyRNGContext(object):
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even
if `openinbrowser` is False, unless you provide a local version of
the documentation to `version` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If True, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
from zlib import decompress
if (not isinstance(obj, six.string_types) and
hasattr(obj, '__module__') and
hasattr(obj, '__name__')):
obj = obj.__module__ + '.' + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from .. import version
if version.release:
version = 'v' + version.version
else:
version = 'dev'
if '://' in version:
if version.endswith('index.html'):
baseurl = version[:-10]
elif version.endswith('/'):
baseurl = version
else:
baseurl = version + '/'
elif version == 'dev' or version == 'latest':
baseurl = 'http://devdocs.astropy.org/'
else:
baseurl = 'http://docs.astropy.org/en/{vers}/'.format(vers=version)
if timeout is None:
uf = urllib.request.urlopen(baseurl + 'objects.inv')
else:
uf = urllib.request.urlopen(baseurl + 'objects.inv', timeout=timeout)
try:
# we read these lines so that `oistr` only gets the compressed
# contents, not the header information
isvers = uf.readline().rstrip().decode('utf-8') # intersphinx version line
proj = uf.readline().rstrip().decode('utf-8') # project name
vers = uf.readline().rstrip().decode('utf-8') # project version
uf.readline().rstrip().decode('utf-8')
oistr = uf.read()
finally:
uf.close()
oistr = decompress(oistr).decode('utf-8')
resurl = None
for l in oistr.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith('$'):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError('Could not find the docs for the object {obj}'.format(obj=obj))
elif openinbrowser:
webbrowser.open(resurl)
return resurl
def signal_number_to_name(signum):
"""
Given an OS signal number, returns a signal name. If the signal
number is unknown, returns ``'UNKNOWN'``.
"""
# Since these numbers and names are platform specific, we use the
# builtin signal module and build a reverse mapping.
signal_to_name_map = dict(
(k, v) for v, k in signal.__dict__.iteritems() if v.startswith('SIG'))
return signal_to_name_map.get(signum, 'UNKNOWN')
if sys.platform == 'win32':
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter `topdown` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
class JsonCustomEncoder(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes (Python 3)
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj):
import numpy as np
if isinstance(obj, (np.ndarray, np.number)):
return obj.tolist()
elif isinstance(obj, (complex, np.complex)):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
return json.JSONEncoder.default(self, obj)
def strip_accents(s):
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def did_you_mean(s, candidates, n=3, cutoff=0.8):
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : sequence of str or dict of str keys
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
if isinstance(s, six.text_type):
s = strip_accents(s)
s_lower = s.lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = {}
for candidate in candidates:
candidate_lower = candidate.lower()
candidates_lower.setdefault(candidate_lower, [])
candidates_lower[candidate_lower].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
if s_lower.endswith('s') and s_lower[:-1] in candidates_lower:
matches = [s_lower[:-1]]
else:
matches = difflib.get_close_matches(
s_lower, candidates_lower, n=n, cutoff=cutoff)
if len(matches):
capitalized_matches = set()
for match in matches:
capitalized_matches.update(candidates_lower[match])
matches = sorted(capitalized_matches)
if len(matches) == 1:
matches = matches[0]
else:
matches = ', '.join(matches[:-1]) + ' or ' + matches[-1]
return 'Did you mean {0}?'.format(matches)
return ''
| [
"signal.__dict__.iteritems",
"numpy.random.seed",
"os.walk",
"numpy.random.set_state",
"sys.exc_info",
"unicodedata.normalize",
"os.path.abspath",
"sys.getfilesystemencoding",
"zlib.decompress",
"ctypes.windll.kernel32.GetFileAttributesW",
"json.JSONEncoder.default",
"difflib.get_close_matches... | [((3789, 3811), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (3809, 3811), False, 'import inspect\n'), ((25253, 25321), 'os.walk', 'os.walk', (['top'], {'topdown': '(True)', 'onerror': 'onerror', 'followlinks': 'followlinks'}), '(top, topdown=True, onerror=onerror, followlinks=followlinks)\n', (25260, 25321), False, 'import os\n'), ((13538, 13559), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (13553, 13559), False, 'import functools\n'), ((19547, 19565), 'numpy.random.get_state', 'random.get_state', ([], {}), '()\n', (19563, 19565), False, 'from numpy import random\n'), ((19574, 19596), 'numpy.random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (19585, 19596), False, 'from numpy import random\n'), ((19696, 19729), 'numpy.random.set_state', 'random.set_state', (['self.startstate'], {}), '(self.startstate)\n', (19712, 19729), False, 'from numpy import random\n'), ((21363, 21384), 'inspect.ismodule', 'inspect.ismodule', (['obj'], {}), '(obj)\n', (21379, 21384), False, 'import inspect\n'), ((24636, 24661), 'os.path.abspath', 'os.path.abspath', (['filepath'], {}), '(filepath)\n', (24651, 24661), False, 'import os\n'), ((26446, 26481), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (26470, 26481), False, 'import json\n'), ((28267, 28339), 'difflib.get_close_matches', 'difflib.get_close_matches', (['s_lower', 'candidates_lower'], {'n': 'n', 'cutoff': 'cutoff'}), '(s_lower, candidates_lower, n=n, cutoff=cutoff)\n', (28292, 28339), False, 'import difflib\n'), ((10068, 10084), 'sys._getframe', 'sys._getframe', (['(1)'], {}), '(1)\n', (10081, 10084), False, 'import sys\n'), ((13774, 13820), 'warnings.warn', 'warnings.warn', (['message', 'category'], {'stacklevel': '(2)'}), '(message, category, stacklevel=2)\n', (13787, 13820), False, 'import warnings\n'), ((18171, 18185), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (18183, 18185), False, 'import sys\n'), ((22636, 22653), 'zlib.decompress', 'decompress', (['oistr'], {}), '(oistr)\n', (22646, 22653), False, 'from zlib import decompress\n'), ((23082, 23105), 'webbrowser.open', 'webbrowser.open', (['resurl'], {}), '(resurl)\n', (23097, 23105), False, 'import webbrowser\n'), ((24041, 24092), 'ctypes.windll.kernel32.GetFileAttributesW', 'ctypes.windll.kernel32.GetFileAttributesW', (['filepath'], {}), '(filepath)\n', (24082, 24092), False, 'import ctypes\n'), ((4124, 4144), 'inspect.ismodule', 'inspect.ismodule', (['fd'], {}), '(fd)\n', (4140, 4144), False, 'import inspect\n'), ((13973, 13997), 'textwrap.dedent', 'textwrap.dedent', (['old_doc'], {}), '(old_doc)\n', (13988, 13997), False, 'import textwrap\n'), ((23479, 23506), 'signal.__dict__.iteritems', 'signal.__dict__.iteritems', ([], {}), '()\n', (23504, 23506), False, 'import signal\n'), ((23979, 24006), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (24004, 24006), False, 'import sys\n'), ((26672, 26703), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 's'], {}), "('NFD', s)\n", (26693, 26703), False, 'import unicodedata\n'), ((26715, 26738), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (26735, 26738), False, 'import unicodedata\n')] |
#%%
import os
import pickle
import warnings
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
import leidenalg as la
import colorcet as cc
import community as cm
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from matplotlib.cm import ScalarMappable
from sklearn.model_selection import ParameterGrid
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed
from graspy.plot import gridplot, heatmap, pairplot
from graspy.utils import symmetrize
from src.data import load_everything, load_metagraph, load_networkx
from src.embed import lse, preprocess_graph
from src.graph import MetaGraph, preprocess
from src.io import savefig, saveobj, saveskels, savecsv
from src.visualization import random_names
from src.block import run_leiden
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
BRAIN_VERSION = "2020-03-02"
BLIND = True
SAVEFIGS = False
SAVESKELS = False
SAVEOBJS = True
np.random.seed(9812343)
sns.set_context("talk")
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
plt.close()
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, save_on=True, **kws)
def stashskel(name, ids, labels, colors=None, palette=None, **kws):
saveskels(
name,
ids,
labels,
colors=colors,
palette=None,
foldername=FNAME,
save_on=SAVESKELS,
**kws,
)
def stashobj(obj, name, **kws):
saveobj(obj, name, foldername=FNAME, save_on=SAVEOBJS, **kws)
def run_experiment(
graph_type=None,
threshold=None,
binarize=None,
seed=None,
param_key=None,
objective_function=None,
implementation="leidenalg",
**kws,
):
np.random.seed(seed)
# load and preprocess the data
mg = load_metagraph(graph_type, version=BRAIN_VERSION)
mg = preprocess(
mg,
threshold=threshold,
sym_threshold=True,
remove_pdiff=True,
binarize=binarize,
)
if implementation == "leidenalg":
if objective_function == "CPM":
partition_type = la.CPMVertexPartition
elif objective_function == "modularity":
partition_type = la.ModularityVertexPartition
partition, modularity = run_leiden(
mg,
temp_loc=seed,
implementation=implementation,
partition_type=partition_type,
**kws,
)
elif implementation == "igraph":
partition, modularity = run_leiden(
mg, temp_loc=seed, implementation=implementation, **kws
)
partition.name = param_key
return partition, modularity
# %% [markdown]
# #
np.random.seed(89888)
n_replicates = 5
param_grid = {
"graph_type": ["G"],
"threshold": [0, 1, 2, 3],
"resolution_parameter": np.geomspace(0.0005, 0.05, 10),
"binarize": [True, False],
"objective_function": ["CPM"],
"n_iterations": [2],
}
params = list(ParameterGrid(param_grid))
seeds = np.random.choice(int(1e8), size=n_replicates * len(params), replace=False)
param_keys = random_names(len(seeds))
rep_params = []
for i, seed in enumerate(seeds):
p = params[i % len(params)].copy()
p["seed"] = seed
p["param_key"] = param_keys[i]
rep_params.append(p)
# %% [markdown]
# #
print("\n\n\n\n")
print(f"Running {len(rep_params)} jobs in total")
print("\n\n\n\n")
outs = Parallel(n_jobs=-2, verbose=50)(delayed(run_experiment)(**p) for p in rep_params)
partitions, modularities = list(zip(*outs))
# %% [markdown]
# #
block_df = pd.concat(partitions, axis=1, ignore_index=False)
stashcsv(block_df, "block-labels")
param_df = pd.DataFrame(rep_params)
param_df["modularity"] = modularities
stashcsv(param_df, "parameters")
| [
"pandas.DataFrame",
"src.graph.preprocess",
"numpy.random.seed",
"os.path.basename",
"src.io.savefig",
"matplotlib.pyplot.close",
"src.io.saveskels",
"numpy.geomspace",
"src.block.run_leiden",
"seaborn.set_context",
"joblib.Parallel",
"src.data.load_metagraph",
"sklearn.model_selection.Param... | [((1117, 1140), 'numpy.random.seed', 'np.random.seed', (['(9812343)'], {}), '(9812343)\n', (1131, 1140), True, 'import numpy as np\n'), ((1141, 1164), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (1156, 1164), True, 'import seaborn as sns\n'), ((2853, 2874), 'numpy.random.seed', 'np.random.seed', (['(89888)'], {}), '(89888)\n', (2867, 2874), True, 'import numpy as np\n'), ((3719, 3768), 'pandas.concat', 'pd.concat', (['partitions'], {'axis': '(1)', 'ignore_index': '(False)'}), '(partitions, axis=1, ignore_index=False)\n', (3728, 3768), True, 'import pandas as pd\n'), ((3815, 3839), 'pandas.DataFrame', 'pd.DataFrame', (['rep_params'], {}), '(rep_params)\n', (3827, 3839), True, 'import pandas as pd\n'), ((945, 971), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (961, 971), False, 'import os\n'), ((1198, 1250), 'src.io.savefig', 'savefig', (['name'], {'foldername': 'FNAME', 'save_on': '(True)'}), '(name, foldername=FNAME, save_on=True, **kws)\n', (1205, 1250), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((1255, 1266), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1264, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1360), 'src.io.savecsv', 'savecsv', (['df', 'name'], {'foldername': 'FNAME', 'save_on': '(True)'}), '(df, name, foldername=FNAME, save_on=True, **kws)\n', (1311, 1360), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((1435, 1540), 'src.io.saveskels', 'saveskels', (['name', 'ids', 'labels'], {'colors': 'colors', 'palette': 'None', 'foldername': 'FNAME', 'save_on': 'SAVESKELS'}), '(name, ids, labels, colors=colors, palette=None, foldername=FNAME,\n save_on=SAVESKELS, **kws)\n', (1444, 1540), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((1646, 1707), 'src.io.saveobj', 'saveobj', (['obj', 'name'], {'foldername': 'FNAME', 'save_on': 'SAVEOBJS'}), '(obj, name, foldername=FNAME, save_on=SAVEOBJS, **kws)\n', (1653, 1707), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((1904, 1924), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1918, 1924), True, 'import numpy as np\n'), ((1970, 2019), 'src.data.load_metagraph', 'load_metagraph', (['graph_type'], {'version': 'BRAIN_VERSION'}), '(graph_type, version=BRAIN_VERSION)\n', (1984, 2019), False, 'from src.data import load_everything, load_metagraph, load_networkx\n'), ((2029, 2126), 'src.graph.preprocess', 'preprocess', (['mg'], {'threshold': 'threshold', 'sym_threshold': '(True)', 'remove_pdiff': '(True)', 'binarize': 'binarize'}), '(mg, threshold=threshold, sym_threshold=True, remove_pdiff=True,\n binarize=binarize)\n', (2039, 2126), False, 'from src.graph import MetaGraph, preprocess\n'), ((2991, 3021), 'numpy.geomspace', 'np.geomspace', (['(0.0005)', '(0.05)', '(10)'], {}), '(0.0005, 0.05, 10)\n', (3003, 3021), True, 'import numpy as np\n'), ((3130, 3155), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['param_grid'], {}), '(param_grid)\n', (3143, 3155), False, 'from sklearn.model_selection import ParameterGrid\n'), ((3562, 3593), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-2)', 'verbose': '(50)'}), '(n_jobs=-2, verbose=50)\n', (3570, 3593), False, 'from joblib import Parallel, delayed\n'), ((2438, 2541), 'src.block.run_leiden', 'run_leiden', (['mg'], {'temp_loc': 'seed', 'implementation': 'implementation', 'partition_type': 'partition_type'}), '(mg, temp_loc=seed, implementation=implementation, partition_type\n =partition_type, **kws)\n', (2448, 2541), False, 'from src.block import run_leiden\n'), ((2677, 2744), 'src.block.run_leiden', 'run_leiden', (['mg'], {'temp_loc': 'seed', 'implementation': 'implementation'}), '(mg, temp_loc=seed, implementation=implementation, **kws)\n', (2687, 2744), False, 'from src.block import run_leiden\n'), ((3594, 3617), 'joblib.delayed', 'delayed', (['run_experiment'], {}), '(run_experiment)\n', (3601, 3617), False, 'from joblib import Parallel, delayed\n')] |
import numpy as np
from fe import topology
from timemachine.lib import potentials
from timemachine.lib import LangevinIntegrator, custom_ops
from ff.handlers import openmm_deserializer
from ff import Forcefield
def get_romol_conf(mol):
"""Coordinates of mol's 0th conformer, in nanometers"""
conformer = mol.GetConformer(0)
guest_conf = np.array(conformer.GetPositions(), dtype=np.float64)
return guest_conf/10 # from angstroms to nm
def minimize_host_4d(romol, host_system, host_coords, ff, box):
"""
Insert romol into a host system via 4D decoupling under a Langevin thermostat.
The ligand coordinates are fixed during this, and only host_coordinates are minimized.
Parameters
----------
romol: ROMol
Ligand to be inserted. It must be embedded.
host_system: openmm.System
OpenMM System representing the host
host_coords: np.ndarray
N x 3 coordinates of the host. units of nanometers.
ff: ff.Forcefield
Wrapper class around a list of handlers
box: np.ndarray [3,3]
Box matrix for periodic boundary conditions. units of nanometers.
Returns
-------
np.ndarray
This returns minimized host_coords.
"""
host_bps, host_masses = openmm_deserializer.deserialize_system(host_system, cutoff=1.2)
# keep the ligand rigid
ligand_masses = [a.GetMass()*100000 for a in romol.GetAtoms()]
combined_masses = np.concatenate([host_masses, ligand_masses])
ligand_coords = get_romol_conf(romol)
combined_coords = np.concatenate([host_coords, ligand_coords])
num_host_atoms = host_coords.shape[0]
final_potentials = []
for bp in host_bps:
if isinstance(bp, potentials.Nonbonded):
host_p = bp
else:
final_potentials.append(bp)
gbt = topology.BaseTopology(romol, ff)
hgt = topology.HostGuestTopology(host_p, gbt)
# setup the parameter handlers for the ligand
tuples = [
[hgt.parameterize_harmonic_bond, [ff.hb_handle]],
[hgt.parameterize_harmonic_angle, [ff.ha_handle]],
[hgt.parameterize_proper_torsion, [ff.pt_handle]],
[hgt.parameterize_improper_torsion, [ff.it_handle]],
[hgt.parameterize_nonbonded, [ff.q_handle, ff.lj_handle]],
]
for fn, handles in tuples:
params, potential = fn(*[h.params for h in handles])
final_potentials.append(potential.bind(params))
seed = 2020
intg = LangevinIntegrator(
300.0,
1.5e-3,
1.0,
combined_masses,
seed
).impl()
x0 = combined_coords
v0 = np.zeros_like(x0)
u_impls = []
for bp in final_potentials:
fn = bp.bound_impl(precision=np.float32)
u_impls.append(fn)
# context components: positions, velocities, box, integrator, energy fxns
ctxt = custom_ops.Context(
x0,
v0,
box,
intg,
u_impls
)
for lamb in np.linspace(1.0, 0, 1000):
ctxt.step(lamb)
return ctxt.get_x_t()[:num_host_atoms]
| [
"numpy.zeros_like",
"ff.handlers.openmm_deserializer.deserialize_system",
"fe.topology.HostGuestTopology",
"timemachine.lib.custom_ops.Context",
"timemachine.lib.LangevinIntegrator",
"fe.topology.BaseTopology",
"numpy.linspace",
"numpy.concatenate"
] | [((1261, 1324), 'ff.handlers.openmm_deserializer.deserialize_system', 'openmm_deserializer.deserialize_system', (['host_system'], {'cutoff': '(1.2)'}), '(host_system, cutoff=1.2)\n', (1299, 1324), False, 'from ff.handlers import openmm_deserializer\n'), ((1443, 1487), 'numpy.concatenate', 'np.concatenate', (['[host_masses, ligand_masses]'], {}), '([host_masses, ligand_masses])\n', (1457, 1487), True, 'import numpy as np\n'), ((1552, 1596), 'numpy.concatenate', 'np.concatenate', (['[host_coords, ligand_coords]'], {}), '([host_coords, ligand_coords])\n', (1566, 1596), True, 'import numpy as np\n'), ((1828, 1860), 'fe.topology.BaseTopology', 'topology.BaseTopology', (['romol', 'ff'], {}), '(romol, ff)\n', (1849, 1860), False, 'from fe import topology\n'), ((1871, 1910), 'fe.topology.HostGuestTopology', 'topology.HostGuestTopology', (['host_p', 'gbt'], {}), '(host_p, gbt)\n', (1897, 1910), False, 'from fe import topology\n'), ((2615, 2632), 'numpy.zeros_like', 'np.zeros_like', (['x0'], {}), '(x0)\n', (2628, 2632), True, 'import numpy as np\n'), ((2850, 2896), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['x0', 'v0', 'box', 'intg', 'u_impls'], {}), '(x0, v0, box, intg, u_impls)\n', (2868, 2896), False, 'from timemachine.lib import LangevinIntegrator, custom_ops\n'), ((2960, 2985), 'numpy.linspace', 'np.linspace', (['(1.0)', '(0)', '(1000)'], {}), '(1.0, 0, 1000)\n', (2971, 2985), True, 'import numpy as np\n'), ((2465, 2526), 'timemachine.lib.LangevinIntegrator', 'LangevinIntegrator', (['(300.0)', '(0.0015)', '(1.0)', 'combined_masses', 'seed'], {}), '(300.0, 0.0015, 1.0, combined_masses, seed)\n', (2483, 2526), False, 'from timemachine.lib import LangevinIntegrator, custom_ops\n')] |
'''
Quaternion based methods and objects
'''
import numpy as np
from math import sin, cos, asin, atan2, degrees, radians, acos
import ctypes
from auv_python_helpers import load_library
quat_lib = load_library("libquat.so")
quat_t = ctypes.c_double * 4
vect_t = ctypes.c_double * 3
double_t = ctypes.c_double
ret_quat = quat_t()
ret_vect = vect_t()
def quat_from_axis_angle(axis, angle):
""" Axis is normalized and angle is in radians in the range [-pi, pi] """
v = axis * sin(angle/2.0)
return Quaternion(q=[cos(angle/2.0), v[0], v[1], v[2]])
class Quaternion(object):
""" A Quaternion """
def __init__(self, q=None, hpr=None, unit=True):
if q is not None:
self.q = np.array(q)
else:
quat_lib.hpr_to_quat(double_t(hpr[0]), double_t(hpr[1]),
double_t(hpr[2]), ret_quat)
self.q = np.array((ret_quat))
if unit:
self.normalize()
self.imag = self.q[1:]
def conjugate(self):
return Quaternion(q=[self.q[0], -self.q[1], -self.q[2], -self.q[3]])
def norm(self):
return np.linalg.norm(self.q)
def normalize(self):
norm = self.norm()
if norm < 1e-20:
raise Exception("Quaternion of norm 0!")
self.q = self.q / norm
self.imag = self.q[1:]
def __mul__(self, other):
if isinstance(other, Quaternion):
quat_lib.quat_quat_mult(quat_t(*self.q), quat_t(*other.q), ret_quat)
return Quaternion(q=ret_quat, unit=False)
else:
quat_lib.quat_vector_mult(quat_t(*self.q), vect_t(*other), ret_vect)
return np.array(ret_vect)
def __getitem__(self, index):
return self.q[index]
def __repr__(self):
return "Quaternion: (%f, %f, %f, %f)" % (self.q[0], self.q[1],
self.q[2], self.q[3])
def __iter__(self):
for q in self.q:
yield q
def axis(self):
return self.imag / sin(self.angle() / 2.0)
def angle(self):
""" Returns angle in the range [0, 2pi] """
return acos(self.q[0]) * 2
def roll(self):
q0, q1, q2, q3 = self.q
return degrees(atan2(2 * (q0*q1 + q2*q3), 1 - 2 * (q1**2 + q2**2)))
def pitch(self):
q0, q1, q2, q3 = self.q
term = 2 * (q0*q2 - q1*q3)
if term > 1.0:
term = 1.0
elif term < -1.0:
term = -1.0
return degrees(asin(term))
def heading(self):
q0, q1, q2, q3 = self.q
return degrees(atan2(2 * (q0*q3 + q1*q2), 1 - 2 * (q2**2 + q3**2)))
def hpr(self):
"""
Returns heading, pitch, and roll as a 3-tuple in degrees.
"""
return [self.heading(), self.pitch(), self.roll()]
def matrix(self):
qw, qx, qy, qz = self.q
ret = np.empty((3, 3))
ret[0][0] = 1 - 2*qy**2 - 2*qz**2
ret[0][1] = 2*qx*qy - 2*qz*qw
ret[0][2] = 2*qx*qz + 2*qy*qw
ret[1][0] = 2*qx*qy + 2*qz*qw
ret[1][1] = 1 - 2*qx**2 - 2*qz**2
ret[1][2] = 2*qy*qz - 2*qx*qw
ret[2][0] = 2*qx*qz - 2*qy*qw
ret[2][1] = 2*qy*qz + 2*qx*qw
ret[2][2] = 1 - 2*qx**2 - 2*qy**2
return ret
| [
"auv_python_helpers.load_library",
"math.asin",
"math.atan2",
"numpy.empty",
"math.sin",
"math.acos",
"numpy.array",
"numpy.linalg.norm",
"math.cos"
] | [((198, 224), 'auv_python_helpers.load_library', 'load_library', (['"""libquat.so"""'], {}), "('libquat.so')\n", (210, 224), False, 'from auv_python_helpers import load_library\n'), ((484, 500), 'math.sin', 'sin', (['(angle / 2.0)'], {}), '(angle / 2.0)\n', (487, 500), False, 'from math import sin, cos, asin, atan2, degrees, radians, acos\n'), ((1128, 1150), 'numpy.linalg.norm', 'np.linalg.norm', (['self.q'], {}), '(self.q)\n', (1142, 1150), True, 'import numpy as np\n'), ((2893, 2909), 'numpy.empty', 'np.empty', (['(3, 3)'], {}), '((3, 3))\n', (2901, 2909), True, 'import numpy as np\n'), ((711, 722), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (719, 722), True, 'import numpy as np\n'), ((889, 907), 'numpy.array', 'np.array', (['ret_quat'], {}), '(ret_quat)\n', (897, 907), True, 'import numpy as np\n'), ((1668, 1686), 'numpy.array', 'np.array', (['ret_vect'], {}), '(ret_vect)\n', (1676, 1686), True, 'import numpy as np\n'), ((2149, 2164), 'math.acos', 'acos', (['self.q[0]'], {}), '(self.q[0])\n', (2153, 2164), False, 'from math import sin, cos, asin, atan2, degrees, radians, acos\n'), ((2245, 2304), 'math.atan2', 'atan2', (['(2 * (q0 * q1 + q2 * q3))', '(1 - 2 * (q1 ** 2 + q2 ** 2))'], {}), '(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 ** 2 + q2 ** 2))\n', (2250, 2304), False, 'from math import sin, cos, asin, atan2, degrees, radians, acos\n'), ((2507, 2517), 'math.asin', 'asin', (['term'], {}), '(term)\n', (2511, 2517), False, 'from math import sin, cos, asin, atan2, degrees, radians, acos\n'), ((2598, 2657), 'math.atan2', 'atan2', (['(2 * (q0 * q3 + q1 * q2))', '(1 - 2 * (q2 ** 2 + q3 ** 2))'], {}), '(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 ** 2 + q3 ** 2))\n', (2603, 2657), False, 'from math import sin, cos, asin, atan2, degrees, radians, acos\n'), ((524, 540), 'math.cos', 'cos', (['(angle / 2.0)'], {}), '(angle / 2.0)\n', (527, 540), False, 'from math import sin, cos, asin, atan2, degrees, radians, acos\n')] |
import sys
sys.path.append('util')
import os
import time
import math
import numpy as np
import mnist_loader
from sklearn.preprocessing import OneHotEncoder
class NN:
def __init__(self, file_name, retrain = False):
self.file_name = file_name
self.retrain = retrain
#useful values
# m,n=np.shape(X)
self.K = 10
self.num_of_hidden = 1
self.init_epsilon = .12
self.num_of_neurons = 25
self.n_of_features = 784
#hyperparameters
self.lmbda = .001
self.alpha = .001
#initialization
self.theta={}
self.a={}
self.z={}
if retrain or not self.load():
self.__initialize_Theta(1, self.n_of_features, self.num_of_neurons)
self.__initialize_Theta(2, self.num_of_neurons, 10)
np.random.seed(0)
def load(self):
if os.path.exists(self.file_name):
thetas_trained = np.load(self.file_name, allow_pickle = True)
self.theta = {1:thetas_trained.item().get(1),2:thetas_trained.item().get(2)}
return True
return False
def save(self):
np.save(self.file_name, self.theta)
return True
def get_test(self):
training_data, validation_data, test_data = mnist_loader.load_data_wrapper('db/')
test_data = np.array(list(test_data))
X_test=np.reshape(np.concatenate(test_data[:,0]),(10000,28,28))
return X_test
def predict(self, x):
x = x / 255.0
x = np.reshape(x, (784))
x = np.insert(x,0,1,axis=0)
h1 = self.__sigmoid(np.matmul(x, self.theta[1].T))
h1 = np.insert(h1,0,1,axis=0)
h2 = self.__sigmoid(np.matmul(h1, self.theta[2].T))
return np.argmax(h2), h2
def train(self, epochs = 50, alpha = 0.001, lmbda = 0.001, batch_size=10):
print('Training...')
start = time.time()
training_data, validation_data, test_data = mnist_loader.load_data_wrapper('db/')
training_data = np.array(list(training_data))
test_data=np.array(list(test_data))
X = np.reshape(np.concatenate(training_data[:,0]),(50000,784))
y_matrix=np.reshape(np.concatenate(training_data[:,1]),(50000,10))
X_test=np.reshape(np.concatenate(test_data[:,0]),(10000,784))
y_test=np.reshape(np.concatenate(np.array([test_data[:,1]]).T),(10000,1))
encoder = OneHotEncoder(sparse=False)
y_test = encoder.fit_transform(y_test)
Xc = X
acc = 0
for j in range(epochs):
for i in range( int( ( np.size(X,0) / batch_size ) ) ):
J, Theta1_grad, Theta2_grad = self.__feedForward(Xc[(batch_size*i):(batch_size*i+batch_size)],self.theta,y_matrix[(batch_size*i):(batch_size*i+batch_size)],lmbda)
self.theta[1] = self.theta[1]-alpha*Theta1_grad
self.theta[2] = self.theta[2]-alpha*Theta2_grad
J_total = self.__computeCost(Xc,self.theta,y_matrix,lmbda)
J_test = self.__computeCost(X_test,self.theta,y_test,lmbda)
print(f'Epoch: {j+1}, alpha: {alpha}, lmbda: {lmbda}, J_batch = : {J}, J_total={J_total}, J_test={J_test}, J_diff={(J_total-J_test)}')
acc = self.__accuracy(X, self.theta, y_test=y_matrix)
print(f'Training accuracy is {acc}')
print(f'Test accuracy is {self.__accuracy(X_test, self.theta, y_test)}')
end = time.time()
elapsed = end - start
return acc, elapsed
def __computeCost(self, X,thetaC,y_matrix,lmbda):
self.a[1] = np.insert(X,0,1,axis=1)
self.z[2] = np.matmul(self.a[1],thetaC[1].T)
self.a[2] = np.insert(self.__sigmoid(self.z[2]),0,1,axis=1)
self.z[3] = np.matmul(self.a[2],(thetaC[2].T))
self.a[3] = self.__sigmoid(self.z[3])
regCost = (lmbda/(2*len(X)))*(np.sum(thetaC[1][:,1:(self.n_of_features+1)])**2 + np.sum(thetaC[2][:,1:(self.num_of_neurons+1)])**2)
J=(1/len(X))*np.sum((-y_matrix*np.log(self.a[3])-(1-y_matrix)*np.log(1-self.a[3]))) + regCost
return J
def __accuracy(self, X, thetaC, y_test):
count = 0
badCount=[]
for i,x in enumerate(X):
output = self.__predict(x,thetaC)
if np.argmax(output) == np.argmax(y_test[i]):
count += 1
else:
badCount.append(i)
return count/len(X)
def __predict(self, x, thetaC):
x = np.insert(x,0,1,axis=0)
h1 = self.__sigmoid(np.matmul(x,thetaC[1].T))
h1 = np.insert(h1,0,1,axis=0)
h2 = self.__sigmoid(np.matmul(h1,thetaC[2].T))
return h2
# takes numpy array
def __sigmoid(self, z):
s = 1 / ( 1 + np.exp(-z) )
return s
def __sigmoidGrad(self, z):
g = self.__sigmoid(z) * ( 1 - self.__sigmoid(z) )
return g
def __initialize_Theta(self, lNum, l1_size, l2_size):
self.theta[lNum] = np.random.random( (l2_size, (l1_size+1) ) ) * ( 2*self.init_epsilon ) - self.init_epsilon
def __feedForward(self, X, thetaC, y_matrix, lmbda):
self.a[1] = np.insert(X, 0, 1, axis=1)
self.z[2] = np.matmul(self.a[1], thetaC[1].T)
self.a[2] = np.insert(self.__sigmoid(self.z[2]), 0, 1, axis=1)
self.z[3] = np.matmul(self.a[2], (thetaC[2].T))
self.a[3] = self.__sigmoid(self.z[3])
regCost = (lmbda / ( 2 * len(X))) * ( np.sum( thetaC[1][:, 1:(self.n_of_features+1) ] ) ** 2 + np.sum(thetaC[2][:, 1:(self.num_of_neurons+1)]) ** 2)
J= (1/len(X)) * np.sum( (-y_matrix * np.log(self.a[3]) - (1-y_matrix) * np.log(1 - self.a[3]))) + regCost
#Back Propagation
d3 = self.a[3] - y_matrix
d2 = np.matmul(d3, thetaC[2][:,1:(self.num_of_neurons+1)]) * self.__sigmoidGrad(self.z[2])
Delta1 = np.matmul(d2.T, self.a[1])
Delta2 = np.matmul(d3.T, self.a[2])
#unregularized gradient
Theta1_grad = (1/len(X)) * Delta1
Theta2_grad = (1/len(X)) * Delta2
#regularized gradient
self.theta[1][:,0] = 0
self.theta[2][:,0] = 0
Theta1_grad = Theta1_grad + thetaC[1] * (lmbda/len(X))
Theta2_grad = Theta2_grad + thetaC[2] * (lmbda/len(X))
return J,Theta1_grad,Theta2_grad
| [
"numpy.load",
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"mnist_loader.load_data_wrapper",
"numpy.exp",
"sys.path.append",
"os.path.exists",
"numpy.insert",
"numpy.reshape",
"numpy.size",
"numpy.save",
"sklearn.preprocessing.OneHotEncoder",
"numpy.concatenate",
"numpy.log",
"tim... | [((12, 35), 'sys.path.append', 'sys.path.append', (['"""util"""'], {}), "('util')\n", (27, 35), False, 'import sys\n'), ((843, 860), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (857, 860), True, 'import numpy as np\n'), ((893, 923), 'os.path.exists', 'os.path.exists', (['self.file_name'], {}), '(self.file_name)\n', (907, 923), False, 'import os\n'), ((1162, 1197), 'numpy.save', 'np.save', (['self.file_name', 'self.theta'], {}), '(self.file_name, self.theta)\n', (1169, 1197), True, 'import numpy as np\n'), ((1295, 1332), 'mnist_loader.load_data_wrapper', 'mnist_loader.load_data_wrapper', (['"""db/"""'], {}), "('db/')\n", (1325, 1332), False, 'import mnist_loader\n'), ((1534, 1552), 'numpy.reshape', 'np.reshape', (['x', '(784)'], {}), '(x, 784)\n', (1544, 1552), True, 'import numpy as np\n'), ((1567, 1593), 'numpy.insert', 'np.insert', (['x', '(0)', '(1)'], {'axis': '(0)'}), '(x, 0, 1, axis=0)\n', (1576, 1593), True, 'import numpy as np\n'), ((1663, 1690), 'numpy.insert', 'np.insert', (['h1', '(0)', '(1)'], {'axis': '(0)'}), '(h1, 0, 1, axis=0)\n', (1672, 1690), True, 'import numpy as np\n'), ((1911, 1922), 'time.time', 'time.time', ([], {}), '()\n', (1920, 1922), False, 'import time\n'), ((1976, 2013), 'mnist_loader.load_data_wrapper', 'mnist_loader.load_data_wrapper', (['"""db/"""'], {}), "('db/')\n", (2006, 2013), False, 'import mnist_loader\n'), ((2432, 2459), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (2445, 2459), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((3465, 3476), 'time.time', 'time.time', ([], {}), '()\n', (3474, 3476), False, 'import time\n'), ((3611, 3637), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (3620, 3637), True, 'import numpy as np\n'), ((3655, 3688), 'numpy.matmul', 'np.matmul', (['self.a[1]', 'thetaC[1].T'], {}), '(self.a[1], thetaC[1].T)\n', (3664, 3688), True, 'import numpy as np\n'), ((3776, 3809), 'numpy.matmul', 'np.matmul', (['self.a[2]', 'thetaC[2].T'], {}), '(self.a[2], thetaC[2].T)\n', (3785, 3809), True, 'import numpy as np\n'), ((4495, 4521), 'numpy.insert', 'np.insert', (['x', '(0)', '(1)'], {'axis': '(0)'}), '(x, 0, 1, axis=0)\n', (4504, 4521), True, 'import numpy as np\n'), ((4586, 4613), 'numpy.insert', 'np.insert', (['h1', '(0)', '(1)'], {'axis': '(0)'}), '(h1, 0, 1, axis=0)\n', (4595, 4613), True, 'import numpy as np\n'), ((5156, 5182), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (5165, 5182), True, 'import numpy as np\n'), ((5204, 5237), 'numpy.matmul', 'np.matmul', (['self.a[1]', 'thetaC[1].T'], {}), '(self.a[1], thetaC[1].T)\n', (5213, 5237), True, 'import numpy as np\n'), ((5347, 5380), 'numpy.matmul', 'np.matmul', (['self.a[2]', 'thetaC[2].T'], {}), '(self.a[2], thetaC[2].T)\n', (5356, 5380), True, 'import numpy as np\n'), ((5922, 5948), 'numpy.matmul', 'np.matmul', (['d2.T', 'self.a[1]'], {}), '(d2.T, self.a[1])\n', (5931, 5948), True, 'import numpy as np\n'), ((5966, 5992), 'numpy.matmul', 'np.matmul', (['d3.T', 'self.a[2]'], {}), '(d3.T, self.a[2])\n', (5975, 5992), True, 'import numpy as np\n'), ((954, 996), 'numpy.load', 'np.load', (['self.file_name'], {'allow_pickle': '(True)'}), '(self.file_name, allow_pickle=True)\n', (961, 996), True, 'import numpy as np\n'), ((1405, 1436), 'numpy.concatenate', 'np.concatenate', (['test_data[:, 0]'], {}), '(test_data[:, 0])\n', (1419, 1436), True, 'import numpy as np\n'), ((1619, 1648), 'numpy.matmul', 'np.matmul', (['x', 'self.theta[1].T'], {}), '(x, self.theta[1].T)\n', (1628, 1648), True, 'import numpy as np\n'), ((1716, 1746), 'numpy.matmul', 'np.matmul', (['h1', 'self.theta[2].T'], {}), '(h1, self.theta[2].T)\n', (1725, 1746), True, 'import numpy as np\n'), ((1763, 1776), 'numpy.argmax', 'np.argmax', (['h2'], {}), '(h2)\n', (1772, 1776), True, 'import numpy as np\n'), ((2137, 2172), 'numpy.concatenate', 'np.concatenate', (['training_data[:, 0]'], {}), '(training_data[:, 0])\n', (2151, 2172), True, 'import numpy as np\n'), ((2213, 2248), 'numpy.concatenate', 'np.concatenate', (['training_data[:, 1]'], {}), '(training_data[:, 1])\n', (2227, 2248), True, 'import numpy as np\n'), ((2287, 2318), 'numpy.concatenate', 'np.concatenate', (['test_data[:, 0]'], {}), '(test_data[:, 0])\n', (2301, 2318), True, 'import numpy as np\n'), ((4547, 4572), 'numpy.matmul', 'np.matmul', (['x', 'thetaC[1].T'], {}), '(x, thetaC[1].T)\n', (4556, 4572), True, 'import numpy as np\n'), ((4639, 4665), 'numpy.matmul', 'np.matmul', (['h1', 'thetaC[2].T'], {}), '(h1, thetaC[2].T)\n', (4648, 4665), True, 'import numpy as np\n'), ((5810, 5864), 'numpy.matmul', 'np.matmul', (['d3', 'thetaC[2][:, 1:self.num_of_neurons + 1]'], {}), '(d3, thetaC[2][:, 1:self.num_of_neurons + 1])\n', (5819, 5864), True, 'import numpy as np\n'), ((4294, 4311), 'numpy.argmax', 'np.argmax', (['output'], {}), '(output)\n', (4303, 4311), True, 'import numpy as np\n'), ((4315, 4335), 'numpy.argmax', 'np.argmax', (['y_test[i]'], {}), '(y_test[i])\n', (4324, 4335), True, 'import numpy as np\n'), ((4764, 4774), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (4770, 4774), True, 'import numpy as np\n'), ((4988, 5028), 'numpy.random.random', 'np.random.random', (['(l2_size, l1_size + 1)'], {}), '((l2_size, l1_size + 1))\n', (5004, 5028), True, 'import numpy as np\n'), ((2372, 2399), 'numpy.array', 'np.array', (['[test_data[:, 1]]'], {}), '([test_data[:, 1]])\n', (2380, 2399), True, 'import numpy as np\n'), ((3895, 3941), 'numpy.sum', 'np.sum', (['thetaC[1][:, 1:self.n_of_features + 1]'], {}), '(thetaC[1][:, 1:self.n_of_features + 1])\n', (3901, 3941), True, 'import numpy as np\n'), ((3946, 3993), 'numpy.sum', 'np.sum', (['thetaC[2][:, 1:self.num_of_neurons + 1]'], {}), '(thetaC[2][:, 1:self.num_of_neurons + 1])\n', (3952, 3993), True, 'import numpy as np\n'), ((5493, 5539), 'numpy.sum', 'np.sum', (['thetaC[1][:, 1:self.n_of_features + 1]'], {}), '(thetaC[1][:, 1:self.n_of_features + 1])\n', (5499, 5539), True, 'import numpy as np\n'), ((5550, 5597), 'numpy.sum', 'np.sum', (['thetaC[2][:, 1:self.num_of_neurons + 1]'], {}), '(thetaC[2][:, 1:self.num_of_neurons + 1])\n', (5556, 5597), True, 'import numpy as np\n'), ((2608, 2621), 'numpy.size', 'np.size', (['X', '(0)'], {}), '(X, 0)\n', (2615, 2621), True, 'import numpy as np\n'), ((4036, 4053), 'numpy.log', 'np.log', (['self.a[3]'], {}), '(self.a[3])\n', (4042, 4053), True, 'import numpy as np\n'), ((4067, 4088), 'numpy.log', 'np.log', (['(1 - self.a[3])'], {}), '(1 - self.a[3])\n', (4073, 4088), True, 'import numpy as np\n'), ((5650, 5667), 'numpy.log', 'np.log', (['self.a[3]'], {}), '(self.a[3])\n', (5656, 5667), True, 'import numpy as np\n'), ((5685, 5706), 'numpy.log', 'np.log', (['(1 - self.a[3])'], {}), '(1 - self.a[3])\n', (5691, 5706), True, 'import numpy as np\n')] |
import math
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from sklearn.preprocessing import StandardScaler
import cython
class DataLoader():
def __init__(self, cols, testcols):
self.cols = cols
self.testcols = testcols
def get_train_data(self, df, assetCode=None, assetName=None, seq_len=5, normalize=True):
'''
Create x, y train sliding sequence windows
*not generative, so only use with one company*
'''
if assetName:
data_train = df.loc[df.assetName==assetName, self.cols].values
else:
data_train = df.loc[df.assetCode==assetCossde, self.cols].values
self.len_train = len(data_train)
x_train = []
y_train = []
for i in range(self.len_train - seq_len):
x, y = self._next_window(data_train, i, seq_len, normalize)
x_train.append(x)
y_train.append(y)
return np.array(x_train), np.array(y_train)
def generate_test_batch(self, df, assetCodes, seq_len, normalize=True):
data_windows = []
for i,asset in enumerate(assetCodes):
window = df.loc[df.assetCode==asset, self.testcols].tail(seq_len).values
window = np.array(window).astype(float)
if window.shape[0] != seq_len:
pad = np.zeros((seq_len-window.shape[0],len(self.testcols)))
window = np.vstack((pad,window))
data_windows.append(window)
data_windows = np.array(data_windows).astype(float)
print(f'normalizing the days data of shape {data_windows.shape}')
data_windows = self.normalize_windows(data_windows, single_window=False) if normalize else data_windows
return np.array(data_windows)
def _next_window(self, data, i, seq_len, normalize):
'''Generates the next data window from the given index location i'''
window = data[i:i+seq_len]
window = self.normalize_windows(window, single_window=True)[0] if normalize else window
x = window[:,1:]
y = np.where(window[-1, [0]]>0,1,0)
return x, y
def normalize_windows(self, window_data, single_window=False):
'''normalize window with a base value of zero'''
normalized_data = []
window_data = [window_data] if single_window else window_data
for window in window_data:
scaler = StandardScaler()
normalized_window = scaler.fit_transform(window)
normalized_data.append(normalized_window)
return np.array(normalized_data)
if __name__ == '__main__':
pass
| [
"numpy.where",
"numpy.array",
"sklearn.preprocessing.StandardScaler",
"numpy.vstack"
] | [((1762, 1784), 'numpy.array', 'np.array', (['data_windows'], {}), '(data_windows)\n', (1770, 1784), True, 'import numpy as np\n'), ((2088, 2123), 'numpy.where', 'np.where', (['(window[-1, [0]] > 0)', '(1)', '(0)'], {}), '(window[-1, [0]] > 0, 1, 0)\n', (2096, 2123), True, 'import numpy as np\n'), ((2567, 2592), 'numpy.array', 'np.array', (['normalized_data'], {}), '(normalized_data)\n', (2575, 2592), True, 'import numpy as np\n'), ((969, 986), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (977, 986), True, 'import numpy as np\n'), ((988, 1005), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (996, 1005), True, 'import numpy as np\n'), ((2420, 2436), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2434, 2436), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1437, 1461), 'numpy.vstack', 'np.vstack', (['(pad, window)'], {}), '((pad, window))\n', (1446, 1461), True, 'import numpy as np\n'), ((1524, 1546), 'numpy.array', 'np.array', (['data_windows'], {}), '(data_windows)\n', (1532, 1546), True, 'import numpy as np\n'), ((1261, 1277), 'numpy.array', 'np.array', (['window'], {}), '(window)\n', (1269, 1277), True, 'import numpy as np\n')] |
import tensorflow as tf
import train
import numpy as np
import pprint
import copy
flags = tf.app.flags
flags.DEFINE_string("datafile", "data/cosmo_primary_64_1k_train.npy", "Input data file for cosmo")
flags.DEFINE_integer("epoch", 1, "Epochs to train [1]")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("flip_labels", 0, "Probability of flipping labels [0]")
flags.DEFINE_integer("z_dim", 100, "Dimension of noise vector z [100]")
flags.DEFINE_integer("nd_layers", 4, "Number of discriminator conv2d layers. [4]")
flags.DEFINE_integer("ng_layers", 4, "Number of generator conv2d_transpose layers. [4]")
flags.DEFINE_integer("gf_dim", 64, "Dimension of gen filters in last conv layer. [64]")
flags.DEFINE_integer("df_dim", 64, "Dimension of discrim filters in first conv layer. [64]")
flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
flags.DEFINE_integer("output_size", 64, "The size of the output images to produce [64]")
flags.DEFINE_integer("c_dim", 1, "Dimension of image color. [1]")
flags.DEFINE_string("data_format", "NHWC", "data format [NHWC]")
flags.DEFINE_boolean("transpose_matmul_b", False, "Transpose matmul B matrix for performance [False]")
flags.DEFINE_string("checkpoint_dir", "checkpoints", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("experiment", "run_0", "Tensorboard run directory name [run_0]")
flags.DEFINE_boolean("save_checkpoint", False, "Save a checkpoint every epoch [False]")
flags.DEFINE_boolean("verbose", True, "print loss on every step [False]")
flags.DEFINE_string("arch", "default", "Architecture, default, KNL or HSW")
config = flags.FLAGS
data_format = ["NHWC"]
batch_size = [32, 64, 128, 256]
def main(_):
pprint.PrettyPrinter().pprint(config.__flags)
for df in data_format:
config.data_format = df
avg_time = train.train_dcgan(get_data(), config)
print("\ndata_format = %s. batch_size = %i"%(config.data_format, config.batch_size))
print("Average time per batch = %3.3f +- %3.5f (s)"%(avg_time[0], avg_time[1]))
print("\nImages/sec = %i\n"%(config.batch_size/avg_time[0]))
for bs in batch_size:
config.batch_size = bs
avg_time = train.train_dcgan(get_data(), config)
print("\ndata_format = %s. batch_size = %i"%(config.data_format, config.batch_size))
print("Average time per batch = %3.3f +- %3.5f (s)"%(avg_time[0], avg_time[1]))
print("\nImages/sec = %i\n"%(config.batch_size/avg_time[0]))
def get_data():
data = np.load(config.datafile, mmap_mode='r')
if config.data_format == 'NHWC':
data = np.expand_dims(data, axis=-1)
else: # 'NCHW'
data = np.expand_dims(data, axis=1)
return data
if __name__ == '__main__':
tf.app.run()
| [
"pprint.PrettyPrinter",
"numpy.load",
"tensorflow.app.run",
"numpy.expand_dims"
] | [((2633, 2672), 'numpy.load', 'np.load', (['config.datafile'], {'mmap_mode': '"""r"""'}), "(config.datafile, mmap_mode='r')\n", (2640, 2672), True, 'import numpy as np\n'), ((2868, 2880), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2878, 2880), True, 'import tensorflow as tf\n'), ((2726, 2755), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (2740, 2755), True, 'import numpy as np\n'), ((2790, 2818), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (2804, 2818), True, 'import numpy as np\n'), ((1827, 1849), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (1847, 1849), False, 'import pprint\n')] |
# McDermott
# 25 March 2021
# power_spectrum.py
import sys
# sys.path.append('<path to macfp-db>/macfp-db/Utilities/')
sys.path.append('../../../../../../macfp-db/Utilities/')
import macfp
import importlib
importlib.reload(macfp)
import matplotlib.pyplot as plt
from scipy import signal
import pandas as pd
import numpy as np
# get the model results
Baseline_Dir = '../../../../NIST_Pool_Fires/Computational_Results/2021/NIST/Baseline/'
M1 = pd.read_csv(Baseline_Dir+'NIST_Methanol_Prescribed_2cm_devc.csv', header=1, sep=' *, *', engine='python')
M2 = pd.read_csv(Baseline_Dir+'NIST_Methanol_Prescribed_1cm_devc.csv', header=1, sep=' *, *', engine='python')
M3 = pd.read_csv(Baseline_Dir+'NIST_Methanol_Prescribed_0p5cm_devc.csv', header=1, sep=' *, *', engine='python')
fs1 = len(M1['Time'])/max(M1['Time'])
fs2 = len(M2['Time'])/max(M2['Time'])
fs3 = len(M3['Time'])/max(M3['Time'])
x1 = M1['w'][M1['Time']>20.]
x2 = M2['w'][M2['Time']>20.]
x3 = M3['w'][M3['Time']>20.]
f1, Pxx_den_1 = signal.periodogram(x1, fs1)
f2, Pxx_den_2 = signal.periodogram(x2, fs2)
f3, Pxx_den_3 = signal.periodogram(x3, fs3)
# plot experimental result
fmeas = np.array([2.64, 2.64])
PSDmeas = np.array([0., 12.])
fh=macfp.plot_to_fig(fmeas, PSDmeas,
plot_type='linear',
x_min=0.5,x_max=4,y_min=0,y_max=15,
x_label='frequency [Hz]',
y_label='PSD [V**2/Hz]',
line_style='--',
line_width=2,
line_color='black',
institute_label='NIST baseline prescribed MLR',
data_label='Exp',
plot_title='Waterloo Methanol 30 cm Puffing Frequency',
show_legend=True,
legend_location='right')
# add error to measuered puffing freq
plt.fill_betweenx(PSDmeas, np.array([2.58, 2.58]), np.array([2.70, 2.70]), color='lightgrey', figure=fh)
fh=macfp.plot_to_fig(f1, Pxx_den_1, plot_type='linear',x_min=0.5,x_max=4,y_min=0,y_max=15,x_label='frequency [Hz]',y_label='PSD [V**2/Hz]',data_label='FDS $\Delta x=2$ cm', line_style='-', line_width=1,line_color='black', marker_style='o',marker_size=4,marker_edge_color='black', marker_fill_color='None',figure_handle=fh,show_legend=True,legend_location='center left')
fh=macfp.plot_to_fig(f2, Pxx_den_2, plot_type='linear',x_min=0.5,x_max=4,y_min=0,y_max=15,x_label='frequency [Hz]',y_label='PSD [V**2/Hz]',data_label='FDS $\Delta x=1$ cm', line_style='-', line_width=1,line_color='magenta',marker_style='^',marker_size=4,marker_edge_color='magenta',marker_fill_color='None',figure_handle=fh,show_legend=True,legend_location='center left')
fh=macfp.plot_to_fig(f3, Pxx_den_3, plot_type='linear',x_min=0.5,x_max=4,y_min=0,y_max=15,x_label='frequency [Hz]',y_label='PSD [V**2/Hz]',data_label='FDS $\Delta x=0.5$ cm', line_style='-.',line_width=1,line_color='red',marker_style='s',marker_size=4,marker_edge_color='red', marker_fill_color='None',figure_handle=fh,show_legend=True,legend_location='center left')
# plt.show()
plt.savefig('./Baseline/Plots/NIST_Waterloo_Methanol_puffing_frequency.pdf')
# loglog spectrum
fh2=macfp.plot_to_fig(f3, Pxx_den_3, plot_type='loglog',x_min=0.5,x_max=1000,y_min=.00001,y_max=100,x_label='frequency [Hz]',y_label='PSD [V**2/Hz]',plot_title='Waterloo Methanol 30 cm Power Spectrum',data_label='FDS $\Delta x=0.5$ cm',line_style='-', line_width=1,line_color='black',show_legend=True,legend_location='lower left',legend_framealpha=1.,institute_label='NIST baseline prescribed MLR')
macfp.plot_to_fig(f3, f3**(-5./3.),plot_type='loglog',x_min=0.5,x_max=1000,y_min=.00001,y_max=100,x_label='frequency [Hz]',y_label='PSD [V**2/Hz]',data_label='f**-5/3',line_style='--', line_width=2,line_color='black',show_legend=True,legend_location='lower left',legend_framealpha=1.,figure_handle=fh2)
fnyquist = np.array([0.5*fs3, 0.5*fs3])
macfp.plot_to_fig(fnyquist, PSDmeas,plot_type='loglog',x_min=0.5,x_max=1000,y_min=.00001,y_max=100,x_label='frequency [Hz]',y_label='PSD [V**2/Hz]',data_label='f Nyquist',line_style='--', line_width=1,line_color='red',show_legend=True,legend_location='lower left',legend_framealpha=1.,figure_handle=fh2)
macfp.plot_to_fig(fmeas, PSDmeas,plot_type='loglog',x_min=0.5,x_max=1000,y_min=.00001,y_max=100,x_label='frequency [Hz]',y_label='PSD [V**2/Hz]',data_label='f puffing',line_style='--', line_width=1,line_color='green',show_legend=True,legend_location='lower left',legend_framealpha=1.,figure_handle=fh2)
# plt.show()
plt.savefig('./Baseline/Plots/NIST_Waterloo_Methanol_Power_Spectrum.pdf')
| [
"sys.path.append",
"pandas.read_csv",
"macfp.plot_to_fig",
"importlib.reload",
"numpy.array",
"scipy.signal.periodogram",
"matplotlib.pyplot.savefig"
] | [((120, 176), 'sys.path.append', 'sys.path.append', (['"""../../../../../../macfp-db/Utilities/"""'], {}), "('../../../../../../macfp-db/Utilities/')\n", (135, 176), False, 'import sys\n'), ((208, 231), 'importlib.reload', 'importlib.reload', (['macfp'], {}), '(macfp)\n', (224, 231), False, 'import importlib\n'), ((445, 557), 'pandas.read_csv', 'pd.read_csv', (["(Baseline_Dir + 'NIST_Methanol_Prescribed_2cm_devc.csv')"], {'header': '(1)', 'sep': '""" *, *"""', 'engine': '"""python"""'}), "(Baseline_Dir + 'NIST_Methanol_Prescribed_2cm_devc.csv', header=\n 1, sep=' *, *', engine='python')\n", (456, 557), True, 'import pandas as pd\n'), ((556, 668), 'pandas.read_csv', 'pd.read_csv', (["(Baseline_Dir + 'NIST_Methanol_Prescribed_1cm_devc.csv')"], {'header': '(1)', 'sep': '""" *, *"""', 'engine': '"""python"""'}), "(Baseline_Dir + 'NIST_Methanol_Prescribed_1cm_devc.csv', header=\n 1, sep=' *, *', engine='python')\n", (567, 668), True, 'import pandas as pd\n'), ((667, 780), 'pandas.read_csv', 'pd.read_csv', (["(Baseline_Dir + 'NIST_Methanol_Prescribed_0p5cm_devc.csv')"], {'header': '(1)', 'sep': '""" *, *"""', 'engine': '"""python"""'}), "(Baseline_Dir + 'NIST_Methanol_Prescribed_0p5cm_devc.csv',\n header=1, sep=' *, *', engine='python')\n", (678, 780), True, 'import pandas as pd\n'), ((995, 1022), 'scipy.signal.periodogram', 'signal.periodogram', (['x1', 'fs1'], {}), '(x1, fs1)\n', (1013, 1022), False, 'from scipy import signal\n'), ((1039, 1066), 'scipy.signal.periodogram', 'signal.periodogram', (['x2', 'fs2'], {}), '(x2, fs2)\n', (1057, 1066), False, 'from scipy import signal\n'), ((1083, 1110), 'scipy.signal.periodogram', 'signal.periodogram', (['x3', 'fs3'], {}), '(x3, fs3)\n', (1101, 1110), False, 'from scipy import signal\n'), ((1147, 1169), 'numpy.array', 'np.array', (['[2.64, 2.64]'], {}), '([2.64, 2.64])\n', (1155, 1169), True, 'import numpy as np\n'), ((1180, 1201), 'numpy.array', 'np.array', (['[0.0, 12.0]'], {}), '([0.0, 12.0])\n', (1188, 1201), True, 'import numpy as np\n'), ((1203, 1584), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['fmeas', 'PSDmeas'], {'plot_type': '"""linear"""', 'x_min': '(0.5)', 'x_max': '(4)', 'y_min': '(0)', 'y_max': '(15)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'line_style': '"""--"""', 'line_width': '(2)', 'line_color': '"""black"""', 'institute_label': '"""NIST baseline prescribed MLR"""', 'data_label': '"""Exp"""', 'plot_title': '"""Waterloo Methanol 30 cm Puffing Frequency"""', 'show_legend': '(True)', 'legend_location': '"""right"""'}), "(fmeas, PSDmeas, plot_type='linear', x_min=0.5, x_max=4,\n y_min=0, y_max=15, x_label='frequency [Hz]', y_label='PSD [V**2/Hz]',\n line_style='--', line_width=2, line_color='black', institute_label=\n 'NIST baseline prescribed MLR', data_label='Exp', plot_title=\n 'Waterloo Methanol 30 cm Puffing Frequency', show_legend=True,\n legend_location='right')\n", (1220, 1584), False, 'import macfp\n'), ((1924, 2325), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['f1', 'Pxx_den_1'], {'plot_type': '"""linear"""', 'x_min': '(0.5)', 'x_max': '(4)', 'y_min': '(0)', 'y_max': '(15)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'data_label': '"""FDS $\\\\Delta x=2$ cm"""', 'line_style': '"""-"""', 'line_width': '(1)', 'line_color': '"""black"""', 'marker_style': '"""o"""', 'marker_size': '(4)', 'marker_edge_color': '"""black"""', 'marker_fill_color': '"""None"""', 'figure_handle': 'fh', 'show_legend': '(True)', 'legend_location': '"""center left"""'}), "(f1, Pxx_den_1, plot_type='linear', x_min=0.5, x_max=4,\n y_min=0, y_max=15, x_label='frequency [Hz]', y_label='PSD [V**2/Hz]',\n data_label='FDS $\\\\Delta x=2$ cm', line_style='-', line_width=1,\n line_color='black', marker_style='o', marker_size=4, marker_edge_color=\n 'black', marker_fill_color='None', figure_handle=fh, show_legend=True,\n legend_location='center left')\n", (1941, 2325), False, 'import macfp\n'), ((2295, 2699), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['f2', 'Pxx_den_2'], {'plot_type': '"""linear"""', 'x_min': '(0.5)', 'x_max': '(4)', 'y_min': '(0)', 'y_max': '(15)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'data_label': '"""FDS $\\\\Delta x=1$ cm"""', 'line_style': '"""-"""', 'line_width': '(1)', 'line_color': '"""magenta"""', 'marker_style': '"""^"""', 'marker_size': '(4)', 'marker_edge_color': '"""magenta"""', 'marker_fill_color': '"""None"""', 'figure_handle': 'fh', 'show_legend': '(True)', 'legend_location': '"""center left"""'}), "(f2, Pxx_den_2, plot_type='linear', x_min=0.5, x_max=4,\n y_min=0, y_max=15, x_label='frequency [Hz]', y_label='PSD [V**2/Hz]',\n data_label='FDS $\\\\Delta x=1$ cm', line_style='-', line_width=1,\n line_color='magenta', marker_style='^', marker_size=4,\n marker_edge_color='magenta', marker_fill_color='None', figure_handle=fh,\n show_legend=True, legend_location='center left')\n", (2312, 2699), False, 'import macfp\n'), ((2667, 3067), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['f3', 'Pxx_den_3'], {'plot_type': '"""linear"""', 'x_min': '(0.5)', 'x_max': '(4)', 'y_min': '(0)', 'y_max': '(15)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'data_label': '"""FDS $\\\\Delta x=0.5$ cm"""', 'line_style': '"""-."""', 'line_width': '(1)', 'line_color': '"""red"""', 'marker_style': '"""s"""', 'marker_size': '(4)', 'marker_edge_color': '"""red"""', 'marker_fill_color': '"""None"""', 'figure_handle': 'fh', 'show_legend': '(True)', 'legend_location': '"""center left"""'}), "(f3, Pxx_den_3, plot_type='linear', x_min=0.5, x_max=4,\n y_min=0, y_max=15, x_label='frequency [Hz]', y_label='PSD [V**2/Hz]',\n data_label='FDS $\\\\Delta x=0.5$ cm', line_style='-.', line_width=1,\n line_color='red', marker_style='s', marker_size=4, marker_edge_color=\n 'red', marker_fill_color='None', figure_handle=fh, show_legend=True,\n legend_location='center left')\n", (2684, 3067), False, 'import macfp\n'), ((3046, 3122), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./Baseline/Plots/NIST_Waterloo_Methanol_puffing_frequency.pdf"""'], {}), "('./Baseline/Plots/NIST_Waterloo_Methanol_puffing_frequency.pdf')\n", (3057, 3122), True, 'import matplotlib.pyplot as plt\n'), ((3146, 3576), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['f3', 'Pxx_den_3'], {'plot_type': '"""loglog"""', 'x_min': '(0.5)', 'x_max': '(1000)', 'y_min': '(1e-05)', 'y_max': '(100)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'plot_title': '"""Waterloo Methanol 30 cm Power Spectrum"""', 'data_label': '"""FDS $\\\\Delta x=0.5$ cm"""', 'line_style': '"""-"""', 'line_width': '(1)', 'line_color': '"""black"""', 'show_legend': '(True)', 'legend_location': '"""lower left"""', 'legend_framealpha': '(1.0)', 'institute_label': '"""NIST baseline prescribed MLR"""'}), "(f3, Pxx_den_3, plot_type='loglog', x_min=0.5, x_max=1000,\n y_min=1e-05, y_max=100, x_label='frequency [Hz]', y_label=\n 'PSD [V**2/Hz]', plot_title='Waterloo Methanol 30 cm Power Spectrum',\n data_label='FDS $\\\\Delta x=0.5$ cm', line_style='-', line_width=1,\n line_color='black', show_legend=True, legend_location='lower left',\n legend_framealpha=1.0, institute_label='NIST baseline prescribed MLR')\n", (3163, 3576), False, 'import macfp\n'), ((3541, 3880), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['f3', '(f3 ** (-5.0 / 3.0))'], {'plot_type': '"""loglog"""', 'x_min': '(0.5)', 'x_max': '(1000)', 'y_min': '(1e-05)', 'y_max': '(100)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'data_label': '"""f**-5/3"""', 'line_style': '"""--"""', 'line_width': '(2)', 'line_color': '"""black"""', 'show_legend': '(True)', 'legend_location': '"""lower left"""', 'legend_framealpha': '(1.0)', 'figure_handle': 'fh2'}), "(f3, f3 ** (-5.0 / 3.0), plot_type='loglog', x_min=0.5,\n x_max=1000, y_min=1e-05, y_max=100, x_label='frequency [Hz]', y_label=\n 'PSD [V**2/Hz]', data_label='f**-5/3', line_style='--', line_width=2,\n line_color='black', show_legend=True, legend_location='lower left',\n legend_framealpha=1.0, figure_handle=fh2)\n", (3558, 3880), False, 'import macfp\n'), ((3855, 3887), 'numpy.array', 'np.array', (['[0.5 * fs3, 0.5 * fs3]'], {}), '([0.5 * fs3, 0.5 * fs3])\n', (3863, 3887), True, 'import numpy as np\n'), ((3884, 4219), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['fnyquist', 'PSDmeas'], {'plot_type': '"""loglog"""', 'x_min': '(0.5)', 'x_max': '(1000)', 'y_min': '(1e-05)', 'y_max': '(100)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'data_label': '"""f Nyquist"""', 'line_style': '"""--"""', 'line_width': '(1)', 'line_color': '"""red"""', 'show_legend': '(True)', 'legend_location': '"""lower left"""', 'legend_framealpha': '(1.0)', 'figure_handle': 'fh2'}), "(fnyquist, PSDmeas, plot_type='loglog', x_min=0.5, x_max=\n 1000, y_min=1e-05, y_max=100, x_label='frequency [Hz]', y_label=\n 'PSD [V**2/Hz]', data_label='f Nyquist', line_style='--', line_width=1,\n line_color='red', show_legend=True, legend_location='lower left',\n legend_framealpha=1.0, figure_handle=fh2)\n", (3901, 4219), False, 'import macfp\n'), ((4188, 4521), 'macfp.plot_to_fig', 'macfp.plot_to_fig', (['fmeas', 'PSDmeas'], {'plot_type': '"""loglog"""', 'x_min': '(0.5)', 'x_max': '(1000)', 'y_min': '(1e-05)', 'y_max': '(100)', 'x_label': '"""frequency [Hz]"""', 'y_label': '"""PSD [V**2/Hz]"""', 'data_label': '"""f puffing"""', 'line_style': '"""--"""', 'line_width': '(1)', 'line_color': '"""green"""', 'show_legend': '(True)', 'legend_location': '"""lower left"""', 'legend_framealpha': '(1.0)', 'figure_handle': 'fh2'}), "(fmeas, PSDmeas, plot_type='loglog', x_min=0.5, x_max=1000,\n y_min=1e-05, y_max=100, x_label='frequency [Hz]', y_label=\n 'PSD [V**2/Hz]', data_label='f puffing', line_style='--', line_width=1,\n line_color='green', show_legend=True, legend_location='lower left',\n legend_framealpha=1.0, figure_handle=fh2)\n", (4205, 4521), False, 'import macfp\n'), ((4506, 4579), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./Baseline/Plots/NIST_Waterloo_Methanol_Power_Spectrum.pdf"""'], {}), "('./Baseline/Plots/NIST_Waterloo_Methanol_Power_Spectrum.pdf')\n", (4517, 4579), True, 'import matplotlib.pyplot as plt\n'), ((1842, 1864), 'numpy.array', 'np.array', (['[2.58, 2.58]'], {}), '([2.58, 2.58])\n', (1850, 1864), True, 'import numpy as np\n'), ((1866, 1886), 'numpy.array', 'np.array', (['[2.7, 2.7]'], {}), '([2.7, 2.7])\n', (1874, 1886), True, 'import numpy as np\n')] |
import os
import numpy
#This file is to run the MH test and Fisher's exact test.
thres = 0.5 #quantile, threshold of success
i0file = '/PATH/cldi0.txt'
i1file = '/PATH/cldi1.txt'
print('process start')
####################################################
#read these files,get the count of success and fail#
####################################################
with open(i0file,'r') as fdo:
line = fdo.readline()
i0d1 = line.split(',')
line = fdo.readline()
i0d2 = line.split(',')
line = fdo.readline()
i0d3 = line.split(',')
line = fdo.readline()
i0d4 = line.split(',')
line = fdo.readline()
i0d5 = line.split(',')
line = fdo.readline()
i0d6 = line.split(',')
line = fdo.readline()
i0d7 = line.split(',')
line = fdo.readline()
i0d8 = line.split(',')
line = fdo.readline()
i0d9 = line.split(',')
line = fdo.readline()
i0d10 = line.split(',')
line = fdo.readline()
i0d11 = line.split(',')
line = fdo.readline()
i0d12 = line.split(',')
line = fdo.readline()
i0d13 = line.split(',')
with open(i1file,'r') as fdo:
line = fdo.readline()
i1d1 = line.split(',')
line = fdo.readline()
i1d2 = line.split(',')
line = fdo.readline()
i1d3 = line.split(',')
line = fdo.readline()
i1d4 = line.split(',')
line = fdo.readline()
i1d5 = line.split(',')
line = fdo.readline()
i1d6 = line.split(',')
line = fdo.readline()
i1d7 = line.split(',')
line = fdo.readline()
i1d8 = line.split(',')
line = fdo.readline()
i1d9 = line.split(',')
line = fdo.readline()
i1d10 = line.split(',')
line = fdo.readline()
i1d11 = line.split(',')
line = fdo.readline()
i1d12 = line.split(',')
line = fdo.readline()
i1d13 = line.split(',')
#extract the cut point#
cldlistf = []
cldlist = i0d1+i0d2+i0d3+i0d4+i0d5+i0d6+i0d7+i0d8+i0d9+i0d10+i0d11+i0d12+i0d13+i1d1+i1d2+i1d3+i1d4+i1d5+i1d6+i1d7+i1d8+i1d9+i1d10+i1d11+i1d12+i1d13
count9 = 0
count0 = 0
for i in range(0,len(cldlist)):
if not cldlist[i] == '9.0' and not cldlist[i] == '0.0':
cldlistf.append(float(cldlist[i])**2)
elif cldlist[i] == '9.0':
count9 = count9 + 1
elif cldlist[i] == '0.0':
count0 = count0 + 1
cutp =numpy.percentile(cldlistf,100*thres)
print('the',thres,'quantile is',cutp)
###############################
#calculat the success and fail#
###############################
i0d1s = 0
i0d1f = 0
for i in range(0,len(i0d1)):
if not i0d1[i] == '9.0'and not i0d1[i] == '0.0':
if float(i0d1[i])**2 > cutp:
i0d1s = i0d1s + 1
else:
i0d1f = i0d1f + 1
print('i0d1s and f are:',i0d1s,i0d1f)
i0d2s = 0
i0d2f = 0
for i in range(0,len(i0d2)):
if not i0d2[i] == '9.0'and not i0d2[i] == '0.0':
if float(i0d2[i])**2 > cutp:
i0d2s = i0d2s + 1
else:
i0d2f = i0d2f + 1
print('i0d2s and f are:',i0d2s,i0d2f)
i0d3s = 0
i0d3f = 0
for i in range(0,len(i0d3)):
if not i0d3[i] == '9.0'and not i0d3[i] == '0.0':
if float(i0d3[i])**2 > cutp:
i0d3s = i0d3s + 1
else:
i0d3f = i0d3f + 1
print('i0d3s and f are:',i0d3s,i0d3f)
i0d4s = 0
i0d4f = 0
for i in range(0,len(i0d4)):
if not i0d4[i] == '9.0'and not i0d4[i] == '0.0':
if float(i0d4[i])**2 > cutp:
i0d4s = i0d4s + 1
else:
i0d4f = i0d4f + 1
print('i0d4s and f are:',i0d4s,i0d4f)
i0d5s = 0
i0d5f = 0
for i in range(0,len(i0d5)):
if not i0d5[i] == '9.0'and not i0d5[i] == '0.0':
if float(i0d5[i])**2 > cutp:
i0d5s = i0d5s + 1
else:
i0d5f = i0d5f + 1
print('i0d5s and f are:',i0d5s,i0d5f)
i0d6s = 0
i0d6f = 0
for i in range(0,len(i0d6)):
if not i0d6[i] == '9.0'and not i0d6[i] == '0.0':
if float(i0d6[i])**2 > cutp:
i0d6s = i0d6s + 1
else:
i0d6f = i0d6f + 1
print('i0d6s and f are:',i0d6s,i0d6f)
i0d7s = 0
i0d7f = 0
for i in range(0,len(i0d7)):
if not i0d7[i] == '9.0'and not i0d7[i] == '0.0':
if float(i0d7[i])**2 > cutp:
i0d7s = i0d7s + 1
else:
i0d7f = i0d7f + 1
print('i0d7s and f are:',i0d7s,i0d7f)
i0d8s = 0
i0d8f = 0
for i in range(0,len(i0d8)):
if not i0d8[i] == '9.0'and not i0d8[i] == '0.0':
if float(i0d8[i])**2 > cutp:
i0d8s = i0d8s + 1
else:
i0d8f = i0d8f + 1
print('i0d8s and f are:',i0d8s,i0d8f)
i0d9s = 0
i0d9f = 0
for i in range(0,len(i0d9)):
if not i0d9[i] == '9.0'and not i0d9[i] == '0.0':
if float(i0d9[i])**2 > cutp:
i0d9s = i0d9s + 1
else:
i0d9f = i0d9f + 1
print('i09s and f are:',i0d9s,i0d9f)
i0d10s = 0
i0d10f = 0
for i in range(0,len(i0d10)):
if not i0d10[i] == '9.0'and not i0d10[i] == '0.0':
if float(i0d10[i])**2 > cutp:
i0d10s = i0d10s + 1
else:
i0d10f = i0d10f + 1
print('i0d10s and f are:',i0d10s,i0d10f)
i0d11s = 0
i0d11f = 0
for i in range(0,len(i0d11)):
if not i0d11[i] == '9.0'and not i0d11[i] == '0.0':
if float(i0d11[i])**2 > cutp:
i0d11s = i0d11s + 1
else:
i0d11f = i0d11f + 1
print('i0d11s and f are:',i0d11s,i0d11f)
i0d12s = 0
i0d12f = 0
for i in range(0,len(i0d12)):
if not i0d12[i] == '9.0'and not i0d12[i] == '0.0':
if float(i0d12[i])**2 > cutp:
i0d12s = i0d12s + 1
else:
i0d12f = i0d12f + 1
print('i0d12s and f are:',i0d12s,i0d12f)
i0d13s = 0
i0d13f = 0
for i in range(0,len(i0d13)):
if not i0d13[i] == '9.0'and not i0d13[i] == '0.0':
if float(i0d13[i])**2 > cutp:
i0d13s = i0d13s + 1
else:
i0d13f = i0d13f + 1
print('i0d13s and f are:',i0d13s,i0d13f)
i1d1s = 0
i1d1f = 0
for i in range(0,len(i1d1)):
if not i1d1[i] == '9.0'and not i1d1[i] == '0.0':
if float(i1d1[i])**2 > cutp:
i1d1s = i1d1s + 1
else:
i1d1f = i1d1f + 1
print('i1d1s and f are:',i1d1s,i1d1f)
i1d2s = 0
i1d2f = 0
for i in range(0,len(i1d2)):
if not i1d2[i] == '9.0'and not i1d2[i] == '0.0':
if float(i1d2[i])**2 > cutp:
i1d2s = i1d2s + 1
else:
i1d2f = i1d2f + 1
print('i1d2s and f are:',i1d2s,i1d2f)
i1d3s = 0
i1d3f = 0
for i in range(0,len(i1d3)):
if not i1d3[i] == '9.0'and not i1d3[i] == '0.0':
if float(i1d3[i])**2 > cutp:
i1d3s = i1d3s + 1
else:
i1d3f = i1d3f + 1
print('i1d3s and f are:',i1d3s,i1d3f)
i1d4s = 0
i1d4f = 0
for i in range(0,len(i1d4)):
if not i1d4[i] == '9.0'and not i1d4[i] == '0.0':
if float(i1d4[i])**2 > cutp:
i1d4s = i1d4s + 1
else:
i1d4f = i1d4f + 1
print('i1d4s and f are:',i1d4s,i1d4f)
i1d5s = 0
i1d5f = 0
for i in range(0,len(i1d5)):
if not i1d5[i] == '9.0'and not i1d5[i] == '0.0':
if float(i1d5[i])**2 > cutp:
i1d5s = i1d5s + 1
else:
i1d5f = i1d5f + 1
print('i1d5s and f are:',i1d5s,i1d5f)
i1d6s = 0
i1d6f = 0
for i in range(0,len(i1d6)):
if not i1d6[i] == '9.0'and not i1d6[i] == '0.0':
if float(i1d6[i])**2 > cutp:
i1d6s = i1d6s + 1
else:
i1d6f = i1d6f + 1
print('i1d6s and f are:',i1d6s,i1d6f)
i1d7s = 0
i1d7f = 0
for i in range(0,len(i1d7)):
if not i1d7[i] == '9.0'and not i1d7[i] == '0.0':
if float(i1d7[i])**2 > cutp:
i1d7s = i1d7s + 1
else:
i1d7f = i1d7f + 1
print('i1d7s and f are:',i1d7s,i1d7f)
i1d8s = 0
i1d8f = 0
for i in range(0,len(i1d8)):
if not i1d8[i] == '9.0'and not i1d8[i] == '0.0':
if float(i1d8[i])**2 > cutp:
i1d8s = i1d8s + 1
else:
i1d8f = i1d8f + 1
print('i1d8s and f are:',i1d8s,i1d8f)
i1d9s = 0
i1d9f = 0
for i in range(0,len(i1d9)):
if not i1d9[i] == '9.0'and not i1d9[i] == '0.0':
if float(i1d9[i])**2 > cutp:
i1d9s = i1d9s + 1
else:
i1d9f = i1d9f + 1
print('i19s and f are:',i1d9s,i1d9f)
i1d10s = 0
i1d10f = 0
for i in range(0,len(i1d10)):
if not i1d10[i] == '9.0'and not i1d10[i] == '0.0':
if float(i1d10[i])**2 > cutp:
i1d10s = i1d10s + 1
else:
i1d10f = i1d10f + 1
print('i1d10s and f are:',i1d10s,i1d10f)
i1d11s = 0
i1d11f = 0
for i in range(0,len(i1d11)):
if not i1d11[i] == '9.0'and not i1d11[i] == '0.0':
if float(i1d11[i])**2 > cutp:
i1d11s = i1d11s + 1
else:
i1d11f = i1d11f + 1
print('i1d11s and f are:',i1d11s,i1d11f)
i1d12s = 0
i1d12f = 0
for i in range(0,len(i1d12)):
if not i1d12[i] == '9.0'and not i1d12[i] == '0.0':
if float(i1d12[i])**2 > cutp:
i1d12s = i1d12s + 1
else:
i1d12f = i1d12f + 1
print('i1d12s and f are:',i1d12s,i1d12f)
i1d13s = 0
i1d13f = 0
for i in range(0,len(i1d13)):
if not i1d13[i] == '9.0'and not i1d13[i] == '0.0':
if float(i1d13[i])**2 > cutp:
i1d13s = i1d13s + 1
else:
i1d13f = i1d13f + 1
print('i1d13s and f are:',i1d13s,i1d13f)
###########################
######Total group test#####
###########################
o11 = i1d1s+i1d2s+i1d3s+i1d4s+i1d5s+i1d6s+i1d7s+i1d8s+i1d9s+i1d10s+i1d11s+i1d12s+i1d13s
o12 = i1d1f+i1d2f+i1d3f+i1d4f+i1d5f+i1d6f+i1d7f+i1d8f+i1d9f+i1d10f+i1d11f+i1d12f+i1d13f
o21 = i0d1s+i0d2s+i0d3s+i0d4s+i0d5s+i0d6s+i0d7s+i0d8s+i0d9s+i0d10s+i0d11s+i0d12s+i0d13s
o22 = i0d1f+i0d2f+i0d3f+i0d4f+i0d5f+i0d6f+i0d7f+i0d8f+i0d9f+i0d10f+i0d11f+i0d12f+i0d13f
cn = o11 + o12 + o21 + o22
t1 = (cn**0.5)*(o11*o22 - o12*o21)/(((o11+o12)*(o11+o21)*(o22+o21)*(o22+o12))**0.5)
print('stat t1 is',t1)
print('int total suc is',o11)
print('int total failure is',o12)
print('noint total suc is',o21)
print('noint total failure is',o22)
t3 = (o11-(o11+o12)*(o11+o21)/(cn))/((o11+o12)*(o11+o21)*(cn-o11-o12)*(cn-o11-o21)/((cn**2)*(cn-1)))**0.5
print('t3 is',t3)
#####################
##Test in Group######
#####################
xi = [i1d1s,i1d2s,i1d3s,i1d4s,i1d5s,i1d6s,i1d7s,i1d8s,i1d9s,i1d10s,i1d11s,i1d12s,i1d13s]
ni = [i1d1s+i1d1f+i0d1s+i0d1f,i1d2s+i1d2f+i0d2s+i0d2f,i1d3s+i1d3f+i0d3s+i0d3f,i1d4s+i1d4f+i0d4s+i0d4f,i1d5s+i1d5f+i0d5s+i0d5f,
i1d6s+i1d6f+i0d6s+i0d6f,i1d7s+i1d7f+i0d7s+i0d7f,i1d8s+i1d8f+i0d8s+i0d8f,i1d9s+i1d9f+i0d9s+i0d9f,i1d10s+i1d10f+i0d10s+i0d10f,
i1d11s+i1d11f+i0d11s+i0d11f,i1d12s+i1d12f+i0d12s+i0d12f,i1d13s+i1d13f+i0d13s+i0d13f]
ri = [i1d1s+i1d1f,i1d2s+i1d2f,i1d3s+i1d3f,i1d4s+i1d4f,i1d5s+i1d5f,
i1d6s+i1d6f,i1d7s+i1d7f,i1d8s+i1d8f,i1d9s+i1d9f,i1d10s+i1d10f,
i1d11s+i1d11f,i1d12s+i1d12f,i1d13s+i1d13f]
ci = [i1d1s+i0d1s,i1d2s+i0d2s,i1d3s+i0d3s,i1d4s+i0d4s,i1d5s+i0d5s,
i1d6s+i0d6s,i1d7s+i0d7s,i1d8s+i0d8s,i1d9s+i0d9s,i1d10s+i0d10s,
i1d11s+i0d11s,i1d12s+i0d12s,i1d13s+i0d13s]
sumxi = sum(xi)
sum2 = 0
for i in range(0,13):
sum2 = sum2 + ri[i]*ci[i]/ni[i]
sum3 = 0
for i in range(0,13):
sum3 = sum3 + ri[i]*ci[i]*(ni[i]-ri[i])*(ni[i]-ri[i])/((ni[i]**2)*(ni[i]-1))
t4 = (sumxi-sum2)/(sum3**0.5)
print('numer is',(sumxi-sum2))
print('sum3 is',sum3)
print('stat T4 is',t4)
sum4 = 0
for i in range(0,13):
sum4 = sum4 + ri[i]*ci[i]*(ni[i]-ri[i])*(ni[i]-ri[i])/(ni[i]**3)
print('sum4 is',sum4)
t5 = (sumxi-sum2)/(sum4**0.5)
print('stat T5 is',t5)
print('the whole process over')
| [
"numpy.percentile"
] | [((2187, 2226), 'numpy.percentile', 'numpy.percentile', (['cldlistf', '(100 * thres)'], {}), '(cldlistf, 100 * thres)\n', (2203, 2226), False, 'import numpy\n')] |
import tensorflow.keras as keras
import numpy as np
import pythia
import pythia.learned
import tensorflow as tf
import unittest
class TestLearnedBonds(unittest.TestCase):
@classmethod
def setUpClass(cls):
tf.config.optimizer.set_jit(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def setUp(self):
np.random.seed(12)
tf.random.set_seed(13)
def test_find_max_distance(self):
num_training = 1024
num_neighbors = 4
train_points = np.random.uniform(-1, 1, (num_training, num_neighbors, 3))
validation_points = np.random.uniform(-1, 1, (num_training, num_neighbors, 3))
train_classes = np.argmax(np.linalg.norm(train_points, axis=-1), axis=-1)
validation_classes = np.argmax(np.linalg.norm(validation_points, axis=-1), axis=-1)
validation_data = (validation_points,
keras.utils.to_categorical(validation_classes, num_neighbors))
model = keras.models.Sequential()
model.add(pythia.learned.bonds.InertiaRotation(8, input_shape=train_points.shape[1:]))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(8, activation='relu'))
model.add(keras.layers.Dense(num_neighbors, activation='softmax'))
model.compile('sgd', 'categorical_crossentropy', metrics=['accuracy'])
train_history = model.fit(
train_points,
keras.utils.to_categorical(train_classes, num_neighbors),
validation_data=validation_data, epochs=200, verbose=0)
# we are ending quite early, but 50% accuracy looks to be
# pretty easy to achieve
self.assertGreater(train_history.history['val_accuracy'][-1], .5)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"tensorflow.config.optimizer.set_jit",
"tensorflow.random.set_seed",
"numpy.random.uniform",
"numpy.random.seed",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Dense",
"tensorflow.config.experimental.set_memory_growth",
"numpy.linalg.norm",
"tensorflow.keras.mo... | [((1869, 1884), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1882, 1884), False, 'import unittest\n'), ((223, 256), 'tensorflow.config.optimizer.set_jit', 'tf.config.optimizer.set_jit', (['(True)'], {}), '(True)\n', (250, 256), True, 'import tensorflow as tf\n'), ((272, 323), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (316, 323), True, 'import tensorflow as tf\n'), ((443, 461), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (457, 461), True, 'import numpy as np\n'), ((470, 492), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(13)'], {}), '(13)\n', (488, 492), True, 'import tensorflow as tf\n'), ((610, 668), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(num_training, num_neighbors, 3)'], {}), '(-1, 1, (num_training, num_neighbors, 3))\n', (627, 668), True, 'import numpy as np\n'), ((697, 755), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(num_training, num_neighbors, 3)'], {}), '(-1, 1, (num_training, num_neighbors, 3))\n', (714, 755), True, 'import numpy as np\n'), ((1085, 1110), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (1108, 1110), True, 'import tensorflow.keras as keras\n'), ((361, 412), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (401, 412), True, 'import tensorflow as tf\n'), ((791, 828), 'numpy.linalg.norm', 'np.linalg.norm', (['train_points'], {'axis': '(-1)'}), '(train_points, axis=-1)\n', (805, 828), True, 'import numpy as np\n'), ((878, 920), 'numpy.linalg.norm', 'np.linalg.norm', (['validation_points'], {'axis': '(-1)'}), '(validation_points, axis=-1)\n', (892, 920), True, 'import numpy as np\n'), ((1005, 1066), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['validation_classes', 'num_neighbors'], {}), '(validation_classes, num_neighbors)\n', (1031, 1066), True, 'import tensorflow.keras as keras\n'), ((1129, 1204), 'pythia.learned.bonds.InertiaRotation', 'pythia.learned.bonds.InertiaRotation', (['(8)'], {'input_shape': 'train_points.shape[1:]'}), '(8, input_shape=train_points.shape[1:])\n', (1165, 1204), False, 'import pythia\n'), ((1224, 1246), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (1244, 1246), True, 'import tensorflow.keras as keras\n'), ((1266, 1306), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (1284, 1306), True, 'import tensorflow.keras as keras\n'), ((1326, 1381), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_neighbors'], {'activation': '"""softmax"""'}), "(num_neighbors, activation='softmax')\n", (1344, 1381), True, 'import tensorflow.keras as keras\n'), ((1536, 1592), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['train_classes', 'num_neighbors'], {}), '(train_classes, num_neighbors)\n', (1562, 1592), True, 'import tensorflow.keras as keras\n')] |
# coding: utf-8
from PIL import Image, ImageDraw
import numpy as np
def Robert(img,threshold):
pixel = img.load()
img_new = Image.new(img.mode,img.size)
array = np.zeros((img.width,img.height))
temp1 = 0
temp2 = 0
mask1 = np.array([[1,0],
[0,-1]])
mask2 = np.array([[0,1],
[-1,0]])
for i in range(img.width):
for j in range(img.height):
try:
temp1 = (pixel[i,j]*mask1[0][0] + pixel[i+1,j+1]*mask1[1][1])**2
temp2 = (pixel[i,j+1]*mask2[0][1] + pixel[i+1,j]*mask2[1][0])**2
array[i][j] = (temp1+temp2)**0.5
#img_new.putpixel((i,j), (temp1+temp2)**0.5 )
except:
array[i,j] = pixel[i,j]
#img_new.putpixel((i,j),pixel[i,j])
for i in range(img.width):
for j in range(img.height):
if array[i,j] < threshold:
img_new.putpixel((i,j),255)
else:
img_new.putpixel((i,j),0)
img_new.save("Robert.bmp")
return img_new
def Prewitt(img,threshold):
pixel = img.load()
img_new = Image.new(img.mode,img.size)
array = np.zeros((img.width,img.height))
mask1 = np.array([[-1,-1,-1],
[ 0, 0, 0],
[ 1, 1, 1]])
mask2 = np.array([[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]])
for i in range(1,img.width-1):
for j in range(1,img.height-1):
temp1 = 0
temp2 = 0
for x in range(-1,2):
for y in range(-1,2):
temp1 += pixel[i+x,j+y]*mask1[x+1][y+1]
temp2 += pixel[i+x,j+y]*mask2[x+1][y+1]
array[i][j] = (temp1**2+temp2**2)**0.5
for i in range(img.width):
for j in range(img.height):
if array[i,j] < threshold:
img_new.putpixel((i,j),255)
else:
img_new.putpixel((i,j),0)
img_new.save("Prewitt.bmp")
return img_new
def Sobel(img,threshold):
pixel = img.load()
img_new = Image.new(img.mode,img.size)
array = np.zeros((img.width,img.height))
mask1 = np.array([[-1,-2,-1],
[ 0, 0, 0],
[ 1, 2, 1]])
mask2 = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
for i in range(1,img.width-1):
for j in range(1,img.height-1):
temp1 = 0
temp2 = 0
for x in range(-1,2):
for y in range(-1,2):
temp1 += pixel[i+x,j+y]*mask1[x+1][y+1]
temp2 += pixel[i+x,j+y]*mask2[x+1][y+1]
array[i][j] = (temp1**2+temp2**2)**0.5
for i in range(img.width):
for j in range(img.height):
if array[i,j] < threshold:
img_new.putpixel((i,j),255)
else:
img_new.putpixel((i,j),0)
img_new.save("Sobel.bmp")
return img_new
def Frei_Chen(img,threshold):
pixel = img.load()
img_new = Image.new(img.mode,img.size)
array = np.zeros((img.width,img.height))
mask1 = np.array([[-1,-(2**0.5),-1],
[ 0, 0, 0],
[ 1, 2**0.5, 1]])
mask2 = np.array([[-1, 0, 1],
[-(2**0.5), 0, 2**0.5],
[-1, 0, 1]])
for i in range(1,img.width-1):
for j in range(1,img.height-1):
temp1 = 0
temp2 = 0
for x in range(-1,2):
for y in range(-1,2):
temp1 += pixel[i+x,j+y]*mask1[x+1][y+1]
temp2 += pixel[i+x,j+y]*mask2[x+1][y+1]
array[i][j] = (temp1**2+temp2**2)**0.5
for i in range(img.width):
for j in range(img.height):
if array[i,j] < threshold:
img_new.putpixel((i,j),255)
else:
img_new.putpixel((i,j),0)
img_new.save("Frei_Chen.bmp")
return img_new
def Kirsch(img,threshold):
pixel = img.load()
img_new = Image.new(img.mode,img.size)
array = np.zeros((img.width,img.height))
mask1 = np.array([[-3,-3, 5],
[-3, 0, 5],
[-3,-3, 5]])
mask2 = np.array([[-3, 5, 5],
[-3, 0, 5],
[-3,-3,-3]])
mask3 = np.array([[ 5, 5, 5],
[-3, 0,-3],
[-3,-3,-3]])
mask4 = np.array([[ 5, 5,-3],
[ 5, 0,-3],
[-3,-3,-3]])
mask5 = np.array([[ 5,-3,-3],
[ 5, 0,-3],
[ 5,-3, -3]])
mask6 = np.array([[-3,-3,-3],
[ 5, 0,-3],
[ 5, 5,-3]])
mask7 = np.array([[-3,-3,-3],
[-3, 0,-3],
[ 5, 5, 5]])
mask8 = np.array([[-3,-3,-3],
[-3, 0, 5],
[-3, 5, 5]])
mask_list = [mask1, mask2, mask3, mask4, mask5, mask6, mask7, mask8]
for i in range(1,img.width-1):
for j in range(1,img.height-1):
temp = np.zeros(8)
for k in range(8):
for x in range(-1,2):
for y in range(-1,2):
temp[k] += pixel[i+x,j+y]*mask_list[k][x+1][y+1]
array[i][j] = max(temp)
for i in range(img.width):
for j in range(img.height):
if array[i,j] < threshold:
img_new.putpixel((i,j),255)
else:
img_new.putpixel((i,j),0)
img_new.save("Kirsch.bmp")
return img_new
def Robinson(img,threshold):
pixel = img.load()
img_new = Image.new(img.mode,img.size)
array = np.zeros((img.width,img.height))
mask1 = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
mask2 = np.array([[ 0, 1, 2],
[-1, 0, 1],
[-2,-1, 0]])
mask3 = np.array([[ 1, 2, 1],
[ 0, 0, 0],
[-1,-2,-1]])
mask4 = np.array([[ 2, 1, 0],
[ 1, 0,-1],
[ 0,-1,-2]])
mask5 = np.array([[ 1, 0,-1],
[ 2, 0,-2],
[ 1, 0,-1]])
mask6 = np.array([[ 0,-1,-2],
[ 1, 0,-1],
[ 2, 1, 0]])
mask7 = np.array([[-1,-2,-1],
[ 0, 0, 0],
[ 1, 2, 1]])
mask8 = np.array([[-2,-1, 0],
[-1, 0, 1],
[ 0, 1, 2]])
mask_list = [mask1, mask2, mask3, mask4, mask5, mask6, mask7, mask8]
for i in range(1,img.width-1):
for j in range(1,img.height-1):
temp = np.zeros(8)
for k in range(8):
for x in range(-1,2):
for y in range(-1,2):
temp[k] += pixel[i+x,j+y]*mask_list[k][x+1][y+1]
array[i][j] = max(temp)
for i in range(img.width):
for j in range(img.height):
if array[i,j] < threshold:
img_new.putpixel((i,j),255)
else:
img_new.putpixel((i,j),0)
img_new.save("Robinson.bmp")
return img_new
def Nevatia_Babu(img,threshold):
pixel = img.load()
img_new = Image.new(img.mode,img.size)
array = np.zeros((img.width,img.height))
mask1 = np.array([[ 100, 100, 100, 100, 100], # 0 degree
[ 100, 100, 100, 100, 100],
[ 0, 0, 0, 0, 0],
[-100,-100,-100,-100,-100],
[-100,-100,-100,-100,-100]])
mask2 = np.array([[ 100, 100, 100, 100, 100], # 30 degree
[ 100, 100, 100, 78, -32],
[ 100, 92, 0, -92,-100],
[ 32, -78,-100,-100,-100],
[-100,-100,-100,-100,-100]])
mask3 = np.array([[ 100, 100, 100, 32,-100], # 60 degree
[ 100, 100, 92, -78,-100],
[ 100, 100, 0,-100,-100],
[ 100, 78, -92,-100,-100],
[ 100, -32,-100,-100,-100]])
mask4 = np.array([[-100,-100, 0, 100, 100], # -90 degree
[-100,-100, 0, 100, 100],
[-100,-100, 0, 100, 100],
[-100,-100, 0, 100, 100],
[-100,-100, 0, 100, 100]])
mask5 = np.array([[-100, 32, 100, 100, 100], # -60 degree
[-100, -78, 92, 100, 100],
[-100,-100, 0, 100, 100],
[-100,-100, -92, 78, 100],
[-100,-100,-100, -32, 100]])
mask6 = np.array([[ 100, 100, 100, 100, 100], # -30 degree
[ -32, 78, 100, 100, 100],
[-100, -92, 0, 92, 100],
[-100,-100,-100, -78, 32],
[-100,-100,-100,-100,-100]])
mask_list = [mask1, mask2, mask3, mask4, mask5, mask6]
for i in range(2,img.width-2):
for j in range(2,img.height-2):
temp = np.zeros(6)
for k in range(len(mask_list)):
for x in range(-2,3):
for y in range(-2,3):
temp[k] += pixel[i+x,j+y]*mask_list[k][-x+2,-y+2]
array[i][j] = max(temp)
for i in range(img.width):
for j in range(img.height):
if array[i,j] < threshold:
img_new.putpixel((i,j),255)
else:
img_new.putpixel((i,j),0)
img_new.save("Nevatia_Babu.bmp")
return img_new
lena = Image.open("lena.bmp")
robert = Robert(lena,12)
prewitt = Prewitt(lena,24)
sobel = Sobel(lena,38)
frei_chen = Frei_Chen(lena,30)
kirsch = Kirsch(lena,135)
robinson = Robinson(lena,60)
nevatia_babu=Nevatia_Babu(lena,12500)
| [
"numpy.array",
"PIL.Image.new",
"numpy.zeros",
"PIL.Image.open"
] | [((9826, 9848), 'PIL.Image.open', 'Image.open', (['"""lena.bmp"""'], {}), "('lena.bmp')\n", (9836, 9848), False, 'from PIL import Image, ImageDraw\n'), ((135, 164), 'PIL.Image.new', 'Image.new', (['img.mode', 'img.size'], {}), '(img.mode, img.size)\n', (144, 164), False, 'from PIL import Image, ImageDraw\n'), ((176, 209), 'numpy.zeros', 'np.zeros', (['(img.width, img.height)'], {}), '((img.width, img.height))\n', (184, 209), True, 'import numpy as np\n'), ((249, 276), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (257, 276), True, 'import numpy as np\n'), ((309, 336), 'numpy.array', 'np.array', (['[[0, 1], [-1, 0]]'], {}), '([[0, 1], [-1, 0]])\n', (317, 336), True, 'import numpy as np\n'), ((1157, 1186), 'PIL.Image.new', 'Image.new', (['img.mode', 'img.size'], {}), '(img.mode, img.size)\n', (1166, 1186), False, 'from PIL import Image, ImageDraw\n'), ((1198, 1231), 'numpy.zeros', 'np.zeros', (['(img.width, img.height)'], {}), '((img.width, img.height))\n', (1206, 1231), True, 'import numpy as np\n'), ((1243, 1289), 'numpy.array', 'np.array', (['[[-1, -1, -1], [0, 0, 0], [1, 1, 1]]'], {}), '([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])\n', (1251, 1289), True, 'import numpy as np\n'), ((1346, 1392), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n', (1354, 1392), True, 'import numpy as np\n'), ((2129, 2158), 'PIL.Image.new', 'Image.new', (['img.mode', 'img.size'], {}), '(img.mode, img.size)\n', (2138, 2158), False, 'from PIL import Image, ImageDraw\n'), ((2170, 2203), 'numpy.zeros', 'np.zeros', (['(img.width, img.height)'], {}), '((img.width, img.height))\n', (2178, 2203), True, 'import numpy as np\n'), ((2215, 2261), 'numpy.array', 'np.array', (['[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]'], {}), '([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n', (2223, 2261), True, 'import numpy as np\n'), ((2318, 2364), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (2326, 2364), True, 'import numpy as np\n'), ((3103, 3132), 'PIL.Image.new', 'Image.new', (['img.mode', 'img.size'], {}), '(img.mode, img.size)\n', (3112, 3132), False, 'from PIL import Image, ImageDraw\n'), ((3144, 3177), 'numpy.zeros', 'np.zeros', (['(img.width, img.height)'], {}), '((img.width, img.height))\n', (3152, 3177), True, 'import numpy as np\n'), ((3189, 3249), 'numpy.array', 'np.array', (['[[-1, -2 ** 0.5, -1], [0, 0, 0], [1, 2 ** 0.5, 1]]'], {}), '([[-1, -2 ** 0.5, -1], [0, 0, 0], [1, 2 ** 0.5, 1]])\n', (3197, 3249), True, 'import numpy as np\n'), ((3304, 3364), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2 ** 0.5, 0, 2 ** 0.5], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2 ** 0.5, 0, 2 ** 0.5], [-1, 0, 1]])\n', (3312, 3364), True, 'import numpy as np\n'), ((4102, 4131), 'PIL.Image.new', 'Image.new', (['img.mode', 'img.size'], {}), '(img.mode, img.size)\n', (4111, 4131), False, 'from PIL import Image, ImageDraw\n'), ((4143, 4176), 'numpy.zeros', 'np.zeros', (['(img.width, img.height)'], {}), '((img.width, img.height))\n', (4151, 4176), True, 'import numpy as np\n'), ((4188, 4236), 'numpy.array', 'np.array', (['[[-3, -3, 5], [-3, 0, 5], [-3, -3, 5]]'], {}), '([[-3, -3, 5], [-3, 0, 5], [-3, -3, 5]])\n', (4196, 4236), True, 'import numpy as np\n'), ((4292, 4340), 'numpy.array', 'np.array', (['[[-3, 5, 5], [-3, 0, 5], [-3, -3, -3]]'], {}), '([[-3, 5, 5], [-3, 0, 5], [-3, -3, -3]])\n', (4300, 4340), True, 'import numpy as np\n'), ((4396, 4444), 'numpy.array', 'np.array', (['[[5, 5, 5], [-3, 0, -3], [-3, -3, -3]]'], {}), '([[5, 5, 5], [-3, 0, -3], [-3, -3, -3]])\n', (4404, 4444), True, 'import numpy as np\n'), ((4500, 4548), 'numpy.array', 'np.array', (['[[5, 5, -3], [5, 0, -3], [-3, -3, -3]]'], {}), '([[5, 5, -3], [5, 0, -3], [-3, -3, -3]])\n', (4508, 4548), True, 'import numpy as np\n'), ((4604, 4652), 'numpy.array', 'np.array', (['[[5, -3, -3], [5, 0, -3], [5, -3, -3]]'], {}), '([[5, -3, -3], [5, 0, -3], [5, -3, -3]])\n', (4612, 4652), True, 'import numpy as np\n'), ((4709, 4757), 'numpy.array', 'np.array', (['[[-3, -3, -3], [5, 0, -3], [5, 5, -3]]'], {}), '([[-3, -3, -3], [5, 0, -3], [5, 5, -3]])\n', (4717, 4757), True, 'import numpy as np\n'), ((4813, 4861), 'numpy.array', 'np.array', (['[[-3, -3, -3], [-3, 0, -3], [5, 5, 5]]'], {}), '([[-3, -3, -3], [-3, 0, -3], [5, 5, 5]])\n', (4821, 4861), True, 'import numpy as np\n'), ((4917, 4965), 'numpy.array', 'np.array', (['[[-3, -3, -3], [-3, 0, 5], [-3, 5, 5]]'], {}), '([[-3, -3, -3], [-3, 0, 5], [-3, 5, 5]])\n', (4925, 4965), True, 'import numpy as np\n'), ((5759, 5788), 'PIL.Image.new', 'Image.new', (['img.mode', 'img.size'], {}), '(img.mode, img.size)\n', (5768, 5788), False, 'from PIL import Image, ImageDraw\n'), ((5800, 5833), 'numpy.zeros', 'np.zeros', (['(img.width, img.height)'], {}), '((img.width, img.height))\n', (5808, 5833), True, 'import numpy as np\n'), ((5845, 5891), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (5853, 5891), True, 'import numpy as np\n'), ((5949, 5995), 'numpy.array', 'np.array', (['[[0, 1, 2], [-1, 0, 1], [-2, -1, 0]]'], {}), '([[0, 1, 2], [-1, 0, 1], [-2, -1, 0]])\n', (5957, 5995), True, 'import numpy as np\n'), ((6053, 6099), 'numpy.array', 'np.array', (['[[1, 2, 1], [0, 0, 0], [-1, -2, -1]]'], {}), '([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])\n', (6061, 6099), True, 'import numpy as np\n'), ((6158, 6204), 'numpy.array', 'np.array', (['[[2, 1, 0], [1, 0, -1], [0, -1, -2]]'], {}), '([[2, 1, 0], [1, 0, -1], [0, -1, -2]])\n', (6166, 6204), True, 'import numpy as np\n'), ((6262, 6308), 'numpy.array', 'np.array', (['[[1, 0, -1], [2, 0, -2], [1, 0, -1]]'], {}), '([[1, 0, -1], [2, 0, -2], [1, 0, -1]])\n', (6270, 6308), True, 'import numpy as np\n'), ((6366, 6412), 'numpy.array', 'np.array', (['[[0, -1, -2], [1, 0, -1], [2, 1, 0]]'], {}), '([[0, -1, -2], [1, 0, -1], [2, 1, 0]])\n', (6374, 6412), True, 'import numpy as np\n'), ((6471, 6517), 'numpy.array', 'np.array', (['[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]'], {}), '([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n', (6479, 6517), True, 'import numpy as np\n'), ((6575, 6621), 'numpy.array', 'np.array', (['[[-2, -1, 0], [-1, 0, 1], [0, 1, 2]]'], {}), '([[-2, -1, 0], [-1, 0, 1], [0, 1, 2]])\n', (6583, 6621), True, 'import numpy as np\n'), ((7424, 7453), 'PIL.Image.new', 'Image.new', (['img.mode', 'img.size'], {}), '(img.mode, img.size)\n', (7433, 7453), False, 'from PIL import Image, ImageDraw\n'), ((7465, 7498), 'numpy.zeros', 'np.zeros', (['(img.width, img.height)'], {}), '((img.width, img.height))\n', (7473, 7498), True, 'import numpy as np\n'), ((7510, 7659), 'numpy.array', 'np.array', (['[[100, 100, 100, 100, 100], [100, 100, 100, 100, 100], [0, 0, 0, 0, 0], [-\n 100, -100, -100, -100, -100], [-100, -100, -100, -100, -100]]'], {}), '([[100, 100, 100, 100, 100], [100, 100, 100, 100, 100], [0, 0, 0, 0,\n 0], [-100, -100, -100, -100, -100], [-100, -100, -100, -100, -100]])\n', (7518, 7659), True, 'import numpy as np\n'), ((7774, 7927), 'numpy.array', 'np.array', (['[[100, 100, 100, 100, 100], [100, 100, 100, 78, -32], [100, 92, 0, -92, -\n 100], [32, -78, -100, -100, -100], [-100, -100, -100, -100, -100]]'], {}), '([[100, 100, 100, 100, 100], [100, 100, 100, 78, -32], [100, 92, 0,\n -92, -100], [32, -78, -100, -100, -100], [-100, -100, -100, -100, -100]])\n', (7782, 7927), True, 'import numpy as np\n'), ((8044, 8198), 'numpy.array', 'np.array', (['[[100, 100, 100, 32, -100], [100, 100, 92, -78, -100], [100, 100, 0, -100, \n -100], [100, 78, -92, -100, -100], [100, -32, -100, -100, -100]]'], {}), '([[100, 100, 100, 32, -100], [100, 100, 92, -78, -100], [100, 100, \n 0, -100, -100], [100, 78, -92, -100, -100], [100, -32, -100, -100, -100]])\n', (8052, 8198), True, 'import numpy as np\n'), ((8339, 8488), 'numpy.array', 'np.array', (['[[-100, -100, 0, 100, 100], [-100, -100, 0, 100, 100], [-100, -100, 0, 100,\n 100], [-100, -100, 0, 100, 100], [-100, -100, 0, 100, 100]]'], {}), '([[-100, -100, 0, 100, 100], [-100, -100, 0, 100, 100], [-100, -100,\n 0, 100, 100], [-100, -100, 0, 100, 100], [-100, -100, 0, 100, 100]])\n', (8347, 8488), True, 'import numpy as np\n'), ((8595, 8748), 'numpy.array', 'np.array', (['[[-100, 32, 100, 100, 100], [-100, -78, 92, 100, 100], [-100, -100, 0, 100,\n 100], [-100, -100, -92, 78, 100], [-100, -100, -100, -32, 100]]'], {}), '([[-100, 32, 100, 100, 100], [-100, -78, 92, 100, 100], [-100, -100,\n 0, 100, 100], [-100, -100, -92, 78, 100], [-100, -100, -100, -32, 100]])\n', (8603, 8748), True, 'import numpy as np\n'), ((8861, 9015), 'numpy.array', 'np.array', (['[[100, 100, 100, 100, 100], [-32, 78, 100, 100, 100], [-100, -92, 0, 92, \n 100], [-100, -100, -100, -78, 32], [-100, -100, -100, -100, -100]]'], {}), '([[100, 100, 100, 100, 100], [-32, 78, 100, 100, 100], [-100, -92, \n 0, 92, 100], [-100, -100, -100, -78, 32], [-100, -100, -100, -100, -100]])\n', (8869, 9015), True, 'import numpy as np\n'), ((5175, 5186), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (5183, 5186), True, 'import numpy as np\n'), ((6834, 6845), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (6842, 6845), True, 'import numpy as np\n'), ((9272, 9283), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (9280, 9283), True, 'import numpy as np\n')] |
import os, sys
import copy
import argparse
import random
import cv2
import pickle
import numpy as np
import torch
import pygame
from tqdm import tqdm
from functools import partial
import json
dqgnn_path=os.path.dirname(os.path.abspath(__file__))
root_path=os.path.dirname(os.path.dirname(dqgnn_path))
sys.path.append(root_path)
from arena import Arena, Wrapper
from examples.rl_dqgnn.nn_utils import EnvStateProcessor, get_nn_func, GraphObservationEnvWrapper
from examples.env_setting_kwargs import get_env_kwargs_dict
from torch_geometric.data import Batch
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, tuple):
return list(obj)
else:
return super(NpEncoder, self).default(obj)
class GNNQEvaluator():
def __init__(self, model_path, nn_name, env_setting, num_trajs=100,
store_video=False, store_traj=False, only_success_traj=False, fps=50, eps=0.0):
model_dir = os.path.dirname(model_path)
video_dir = os.path.join(dqgnn_path, "videos")
if not os.path.exists(video_dir):
os.mkdir(video_dir)
video_path = os.path.join(dqgnn_path, f"videos/{os.path.basename(model_dir)}")
if not os.path.exists(video_path):
os.mkdir(video_path)
if store_traj:
traj_path_suffix='_success' if only_success_traj else ''
self.traj_path = os.path.join(model_dir, f"traj{traj_path_suffix}.json")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with open(model_dir + '/network_kwargs.pkl', 'rb') as f:
network_kwargs = pickle.load(f)
nn_func = get_nn_func(nn_name)
qnet = nn_func(**network_kwargs)
qnet.eval()
state_dict = torch.load(model_path)
qnet.load_state_dict(state_dict)
qnet = qnet.to(device)
# os.environ.pop("SDL_VIDEODRIVER")
#env_fn = lambda kwargs_dict: Wrapper(Arena(**kwargs_dict))
env_fn = lambda env_kwargs: GraphObservationEnvWrapper(Arena, env_kwargs)
env_kwargs_dict = get_env_kwargs_dict(env_setting)
self.env_fn = env_fn
self.env_kwargs_dict = env_kwargs_dict
self.qnet = qnet
self.device=device
self.eps = eps
self.video_path=video_path
self.num_trajs = num_trajs
self.store_video = store_video
self.store_traj = store_traj
self.only_success_traj=only_success_traj
self.fps = fps
def update_num_coins(self, num_coins):
self.env_kwargs_dict['num_coins'] = num_coins
def act_best(self, state):
state = copy.deepcopy(state)
state.batch = torch.zeros(len(state.x)).long()
state = state.to(self.device)
with torch.no_grad():
best_action = self.qnet(state).argmax()
return best_action.cpu().item()
def act_eps_best(self, state):
if random.random() < self.eps:
return random.choice(np.arange(6))
#state = copy.deepcopy(state)
#state.batch = torch.zeros(len(state.x)).long()
#state = state.to(self.device)
state=Batch.from_data_list([state]).to(self.device)
with torch.no_grad():
best_action = self.qnet(state).argmax()
return best_action.cpu().item()
def evaluate(self, num_coins_min=None, num_coins_max=None):
if num_coins_min is None:
num_coins_min, num_coins_max = self.env_kwargs_dict['num_coins']
trajs = []
for num_coins in range(num_coins_min, num_coins_max+1):
self.update_num_coins(num_coins)
env = GraphObservationEnvWrapper(Arena, self.env_kwargs_dict)
env.init()
scores = []
for traj_id in tqdm(range(self.num_trajs)):
state = env.reset()
state_raw = env._state_raw
if self.store_video:
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
output_fname=os.path.join(self.video_path,f'{traj_id}.mp4')
output_movie = cv2.VideoWriter(output_fname,
fourcc, 6, (env.render().shape[0], env.render().shape[1]))
#print('\nsaving to: ', output_fname)
current_traj = []
for j in range(self.env_kwargs_dict['max_step']):
if self.store_video:
output_movie.write(env.render())
action = self.act_eps_best(state)
if self.store_traj:
action_list = [0 for _ in env.actions]
action_list[action] = 1
current_traj.append({'state': state_raw, 'action': action_list})
next_state, reward, done, _ = env.step(action)
next_state_raw = env._state_raw
state_raw = next_state_raw
state = next_state
if done:
if self.store_video:
output_movie.write(env.render())
break
if self.store_video:
output_movie.release()
if self.store_traj:
if not self.only_success_traj or int(env.score())==num_coins:
#print(f'traj saved, reward {env.score()}')
trajs.extend(current_traj)
scores.append(env.score())
print('reward:', env.score())
print('num_coins:', self.env_kwargs_dict['num_coins'],
'score:', np.mean(scores), 'std:', np.std(scores))
if self.store_traj:
print(f'saving (s,a) dataset of size {len(trajs)}')
with open(self.traj_path, 'w') as f:
info = {"algorithm": "DoubleDQN",
"rand_seed": 0,
"test_time": 200,
"width": self.env_kwargs_dict['width'],
"height": self.env_kwargs_dict['height'],
"object_size": self.env_kwargs_dict['object_size'],
"obstacle_size": self.env_kwargs_dict['obstacle_size'],
"num_coins_list": [num_coins_min,num_coins_max],
"num_enemies_list": [0],
"num_bombs": 0,
"explosion_max_step": self.env_kwargs_dict['explosion_max_step'],
"explosion_radius": self.env_kwargs_dict['explosion_radius'],
"num_projectiles": self.env_kwargs_dict['num_projectiles'],
"num_obstacles_list": [0],
"agent_speed": self.env_kwargs_dict['agent_speed'],
"enemy_speed": self.env_kwargs_dict['enemy_speed'],
"p_change_direction": self.env_kwargs_dict['p_change_direction'],
"projectile_speed": self.env_kwargs_dict['projectile_speed'],
"reward_decay": self.env_kwargs_dict['reward_decay']}
info["data"] = trajs
json.dump(info, f, cls=NpEncoder)
return {'score': np.mean(scores), 'std:': np.std(scores)}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str)
parser.add_argument('--store_video', action="store_true", default=False)
parser.add_argument('--store_traj', action="store_true", default=False)
parser.add_argument('--only_success_traj', action="store_true", default=False)
parser.add_argument('--num_rewards', type=int, default=5)
parser.add_argument('--num_trajs', type=int, default=100)
parser.add_argument('--env_setting', type=str, default='legacy')
parser.add_argument('--eps', type=float, default=0.0)
parser.add_argument('--nn_name', type=str, default="PointConv")
args = parser.parse_args()
random.seed(2333)
np.random.seed(2333)
torch.manual_seed(2333)
evaluator = GNNQEvaluator(model_path=args.model_path, nn_name=args.nn_name,
env_setting=args.env_setting, num_trajs = args.num_trajs,
store_video=args.store_video, store_traj=args.store_traj,
only_success_traj=args.only_success_traj, eps=args.eps)
evaluator.update_num_coins(args.num_rewards)
eval_result = evaluator.evaluate(args.num_rewards, args.num_rewards)
#eval_result = evaluator.evaluate(1,5) | [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"examples.env_setting_kwargs.get_env_kwargs_dict",
"examples.rl_dqgnn.nn_utils.get_nn_func",
"pickle.load",
"numpy.mean",
"numpy.arange",
"torch.no_grad",
"os.path.join",
"sys.path.append",
"os.path.abspath... | [((302, 328), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (317, 328), False, 'import os, sys\n'), ((220, 245), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (235, 245), False, 'import os, sys\n'), ((273, 300), 'os.path.dirname', 'os.path.dirname', (['dqgnn_path'], {}), '(dqgnn_path)\n', (288, 300), False, 'import os, sys\n'), ((7564, 7589), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7587, 7589), False, 'import argparse\n'), ((8231, 8248), 'random.seed', 'random.seed', (['(2333)'], {}), '(2333)\n', (8242, 8248), False, 'import random\n'), ((8253, 8273), 'numpy.random.seed', 'np.random.seed', (['(2333)'], {}), '(2333)\n', (8267, 8273), True, 'import numpy as np\n'), ((8278, 8301), 'torch.manual_seed', 'torch.manual_seed', (['(2333)'], {}), '(2333)\n', (8295, 8301), False, 'import torch\n'), ((1189, 1216), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (1204, 1216), False, 'import os, sys\n'), ((1237, 1271), 'os.path.join', 'os.path.join', (['dqgnn_path', '"""videos"""'], {}), "(dqgnn_path, 'videos')\n", (1249, 1271), False, 'import os, sys\n'), ((1894, 1914), 'examples.rl_dqgnn.nn_utils.get_nn_func', 'get_nn_func', (['nn_name'], {}), '(nn_name)\n', (1905, 1914), False, 'from examples.rl_dqgnn.nn_utils import EnvStateProcessor, get_nn_func, GraphObservationEnvWrapper\n'), ((1997, 2019), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2007, 2019), False, 'import torch\n'), ((2314, 2346), 'examples.env_setting_kwargs.get_env_kwargs_dict', 'get_env_kwargs_dict', (['env_setting'], {}), '(env_setting)\n', (2333, 2346), False, 'from examples.env_setting_kwargs import get_env_kwargs_dict\n'), ((2863, 2883), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (2876, 2883), False, 'import copy\n'), ((1287, 1312), 'os.path.exists', 'os.path.exists', (['video_dir'], {}), '(video_dir)\n', (1301, 1312), False, 'import os, sys\n'), ((1326, 1345), 'os.mkdir', 'os.mkdir', (['video_dir'], {}), '(video_dir)\n', (1334, 1345), False, 'import os, sys\n'), ((1448, 1474), 'os.path.exists', 'os.path.exists', (['video_path'], {}), '(video_path)\n', (1462, 1474), False, 'import os, sys\n'), ((1488, 1508), 'os.mkdir', 'os.mkdir', (['video_path'], {}), '(video_path)\n', (1496, 1508), False, 'import os, sys\n'), ((1630, 1685), 'os.path.join', 'os.path.join', (['model_dir', 'f"""traj{traj_path_suffix}.json"""'], {}), "(model_dir, f'traj{traj_path_suffix}.json')\n", (1642, 1685), False, 'import os, sys\n'), ((1860, 1874), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1871, 1874), False, 'import pickle\n'), ((2242, 2287), 'examples.rl_dqgnn.nn_utils.GraphObservationEnvWrapper', 'GraphObservationEnvWrapper', (['Arena', 'env_kwargs'], {}), '(Arena, env_kwargs)\n', (2268, 2287), False, 'from examples.rl_dqgnn.nn_utils import EnvStateProcessor, get_nn_func, GraphObservationEnvWrapper\n'), ((2990, 3005), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3003, 3005), False, 'import torch\n'), ((3146, 3161), 'random.random', 'random.random', ([], {}), '()\n', (3159, 3161), False, 'import random\n'), ((3427, 3442), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3440, 3442), False, 'import torch\n'), ((3859, 3914), 'examples.rl_dqgnn.nn_utils.GraphObservationEnvWrapper', 'GraphObservationEnvWrapper', (['Arena', 'self.env_kwargs_dict'], {}), '(Arena, self.env_kwargs_dict)\n', (3885, 3914), False, 'from examples.rl_dqgnn.nn_utils import EnvStateProcessor, get_nn_func, GraphObservationEnvWrapper\n'), ((7480, 7495), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (7487, 7495), True, 'import numpy as np\n'), ((7505, 7519), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (7511, 7519), True, 'import numpy as np\n'), ((1727, 1752), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1750, 1752), False, 'import torch\n'), ((3207, 3219), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (3216, 3219), True, 'import numpy as np\n'), ((3368, 3397), 'torch_geometric.data.Batch.from_data_list', 'Batch.from_data_list', (['[state]'], {}), '([state])\n', (3388, 3397), False, 'from torch_geometric.data import Batch\n'), ((5886, 5901), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (5893, 5901), True, 'import numpy as np\n'), ((5911, 5925), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (5917, 5925), True, 'import numpy as np\n'), ((7419, 7452), 'json.dump', 'json.dump', (['info', 'f'], {'cls': 'NpEncoder'}), '(info, f, cls=NpEncoder)\n', (7428, 7452), False, 'import json\n'), ((1402, 1429), 'os.path.basename', 'os.path.basename', (['model_dir'], {}), '(model_dir)\n', (1418, 1429), False, 'import os, sys\n'), ((4163, 4205), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""m"""', '"""p"""', '"""4"""', '"""v"""'], {}), "('m', 'p', '4', 'v')\n", (4185, 4205), False, 'import cv2\n'), ((4239, 4286), 'os.path.join', 'os.path.join', (['self.video_path', 'f"""{traj_id}.mp4"""'], {}), "(self.video_path, f'{traj_id}.mp4')\n", (4251, 4286), False, 'import os, sys\n')] |
import math
import numpy as np
import torch
import gpytorch
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import MultiStepLR
from sklearn.metrics import roc_auc_score,accuracy_score
from svdkl import (NeuralNetLayer,
GaussianProcessLayer,
DKLModel)
"""
Trainer class train/eval model
"""
class SvDklTrainer:
"""Train SV_DKL model"""
def __init__(self, hyper_params, aml_run):
"""initialize SV-DKL model
Args:
hyper_params(dict):contains model hyperparameters
aml_run(run):AzureML run
"""
self.device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.hyper_params = hyper_params
print(self.hyper_params)
# Bernoulli likelood
self.likelihood = gpytorch.likelihoods.BernoulliLikelihood().to(self.device)
nnet_layer = NeuralNetLayer(data_dim=self.hyper_params['input_dim'],
output_dim=self.hyper_params['latent_dim']
).to(self.device)
self.model = DKLModel(nnet_layer,
num_dim=self.hyper_params['latent_dim'],
grid_bounds=self.hyper_params['grid_bounds'],
grid_size=self.hyper_params['grid_size'],
num_mixtures = self.hyper_params['num_mixtures']
).to(self.device)
# Stochastic variational optimzer
self.optimizer=Adam([
{'params': self.model.nnet_layer.parameters(),'lr':self.hyper_params['nn_lr'], 'betas':(0.9, 0.999)},
{'params': self.model.gp_layer.hyperparameters(), 'lr': self.hyper_params['lh_lr'] * 0.01},
{'params':self. model.gp_layer.variational_parameters()},
{'params': self.likelihood.parameters()}], lr=self.hyper_params['lh_lr'])
#,momentum=0.9, nesterov=True, weight_decay=0)
self.aml_run = aml_run
def fit(self, data_loader):
"""Train SV-DKL model
Args:
dataloader(pytroch dataloader):data loader wrapping training dataset(X,y)
"""
scheduler = MultiStepLR(self.optimizer,
gamma=0.1,
milestones=[0.5 * self.hyper_params['epochs'], 0.75 * self.hyper_params['epochs']])
for epoch in range(1, self.hyper_params['epochs'] + 1):
self.model.train()
self.likelihood.train()
mll = gpytorch.mlls.VariationalELBO(self.likelihood,
self.model.gp_layer,
num_data=len(data_loader.dataset))
train_loss = 0.
for i, (data, target) in enumerate(data_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = -mll(output, target)
loss.backward()
self.optimizer.step()
if (i+ 1) % 2 == 0:
print('Train Epoch: %d [%03d/%03d], Loss: %.6f' % (epoch, i + 1, len(data_loader), loss.item()))
if self.aml_run is not None:
self.aml_run.log("loss",loss.item())
def eval(self, dataloader):
"""Evaluate SV-DKL model on test dataset
Args:
dataloader(pytroch dataloader):Data loader wrapping test dataset(X,y)
"""
y_pred_lst = []
y_truth_lst = []
with torch.no_grad():
for i, (X, y) in enumerate(dataloader):
output = self.likelihood(self.model(X.to(self.device)))
y_pred = output.mean.ge(0.5).float().cpu().numpy()
y_pred_lst.append(y_pred)
y_truth_lst.append(y.numpy())
truth = np.concatenate(y_truth_lst)
pred = np.concatenate(y_pred_lst)
auc = roc_auc_score(truth,pred)
accuracy = accuracy_score(truth,pred)
print("AUC score: ",round(auc,2))
print("Accuracy score: ",round(accuracy,2))
if self.aml_run is not None:
self.aml_run.log('auc',round(auc,2))
self.aml_run.log('Accuracy',round(accuracy,2))
| [
"sklearn.metrics.accuracy_score",
"sklearn.metrics.roc_auc_score",
"svdkl.NeuralNetLayer",
"torch.cuda.is_available",
"svdkl.DKLModel",
"torch.no_grad",
"gpytorch.likelihoods.BernoulliLikelihood",
"numpy.concatenate",
"torch.optim.lr_scheduler.MultiStepLR"
] | [((2273, 2400), 'torch.optim.lr_scheduler.MultiStepLR', 'MultiStepLR', (['self.optimizer'], {'gamma': '(0.1)', 'milestones': "[0.5 * self.hyper_params['epochs'], 0.75 * self.hyper_params['epochs']]"}), "(self.optimizer, gamma=0.1, milestones=[0.5 * self.hyper_params[\n 'epochs'], 0.75 * self.hyper_params['epochs']])\n", (2284, 2400), False, 'from torch.optim.lr_scheduler import MultiStepLR\n'), ((3739, 3754), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3752, 3754), False, 'import torch\n'), ((4068, 4095), 'numpy.concatenate', 'np.concatenate', (['y_truth_lst'], {}), '(y_truth_lst)\n', (4082, 4095), True, 'import numpy as np\n'), ((4116, 4142), 'numpy.concatenate', 'np.concatenate', (['y_pred_lst'], {}), '(y_pred_lst)\n', (4130, 4142), True, 'import numpy as np\n'), ((4170, 4196), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['truth', 'pred'], {}), '(truth, pred)\n', (4183, 4196), False, 'from sklearn.metrics import roc_auc_score, accuracy_score\n'), ((4219, 4246), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'pred'], {}), '(truth, pred)\n', (4233, 4246), False, 'from sklearn.metrics import roc_auc_score, accuracy_score\n'), ((683, 708), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (706, 708), False, 'import torch\n'), ((850, 892), 'gpytorch.likelihoods.BernoulliLikelihood', 'gpytorch.likelihoods.BernoulliLikelihood', ([], {}), '()\n', (890, 892), False, 'import gpytorch\n'), ((931, 1035), 'svdkl.NeuralNetLayer', 'NeuralNetLayer', ([], {'data_dim': "self.hyper_params['input_dim']", 'output_dim': "self.hyper_params['latent_dim']"}), "(data_dim=self.hyper_params['input_dim'], output_dim=self.\n hyper_params['latent_dim'])\n", (945, 1035), False, 'from svdkl import NeuralNetLayer, GaussianProcessLayer, DKLModel\n'), ((1152, 1359), 'svdkl.DKLModel', 'DKLModel', (['nnet_layer'], {'num_dim': "self.hyper_params['latent_dim']", 'grid_bounds': "self.hyper_params['grid_bounds']", 'grid_size': "self.hyper_params['grid_size']", 'num_mixtures': "self.hyper_params['num_mixtures']"}), "(nnet_layer, num_dim=self.hyper_params['latent_dim'], grid_bounds=\n self.hyper_params['grid_bounds'], grid_size=self.hyper_params[\n 'grid_size'], num_mixtures=self.hyper_params['num_mixtures'])\n", (1160, 1359), False, 'from svdkl import NeuralNetLayer, GaussianProcessLayer, DKLModel\n')] |
import os
import argparse
import numpy as np
import pickle as pkl
import skimage.external.tifffile as tiffreader
from PIL import Image
class Downsample:
def __init__(self, resize_by):
self.resize_by = resize_by
def execute(self, file_name):
if file_name.endswith(".jpg"):
image = Image.open(file_name).convert('I')
else:
image = tiffreader.imread(file_name)
image = Image.fromarray(image).convert("I")
img_size = np.array(image.size) * self.resize_by
img_size = img_size.astype(int)
image = image.resize(img_size.astype(int), Image.ANTIALIAS)
return np.array(image, dtype=np.uint8)
def tif2pkl(tif_dir, pkl_dir, name, label, name_format, start_idx, end_idx, resize_by=0.5):
image_stack = []
label_list = []
ds = Downsample(resize_by)
for idx in range(start_idx, end_idx+1):
image = ds.execute(os.path.join(tif_dir, name_format % idx))
image_stack.append(image)
label_list.append("%s_%d" % (name, idx))
image_stack = np.array(image_stack)
if not os.path.exists(pkl_dir):
os.makedirs(pkl_dir)
pkl.dump([image_stack, label_list], open(os.path.join(pkl_dir, "%s_%s.pkl" % (name, label)), 'wb'), protocol=4)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser('Transform the .tif images to a .pkl stack.')
parser.add_argument('--tif-dir', default='./imgs/', type=str, help='The directory of your .tif/.jpg images.')
parser.add_argument('--pkl-dir', default='./data/hdt1_phase.pkl', type=str, help='The directory for .pkl file.')
parser.add_argument('--name', default='ds1', type=str, help='The name of this dataset.')
parser.add_argument('--label', default='phase', type=str, help='The label of the image stack.')
parser.add_argument('--name-format', default='images_%d.tif', type=str, help='The specific format for images.')
parser.add_argument('--start-idx', default=0, type=int, help='The start index.')
parser.add_argument('--end-idx', default=100, type=int, help='The end index.')
parser.add_argument('--resize-by', default=1, type=float, help='Resize the image for sufficient training.')
args = parser.parse_args()
tif2pkl(args.tif_dir, args.pkl_dir, args.name, args.label, args.name_format, args.start_idx, args.end_idx,
resize_by=args.resize_by)
| [
"os.makedirs",
"skimage.external.tifffile.imread",
"argparse.ArgumentParser",
"os.path.exists",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"os.path.join"
] | [((1066, 1087), 'numpy.array', 'np.array', (['image_stack'], {}), '(image_stack)\n', (1074, 1087), True, 'import numpy as np\n'), ((1324, 1393), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Transform the .tif images to a .pkl stack."""'], {}), "('Transform the .tif images to a .pkl stack.')\n", (1347, 1393), False, 'import argparse\n'), ((653, 684), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (661, 684), True, 'import numpy as np\n'), ((1099, 1122), 'os.path.exists', 'os.path.exists', (['pkl_dir'], {}), '(pkl_dir)\n', (1113, 1122), False, 'import os\n'), ((1132, 1152), 'os.makedirs', 'os.makedirs', (['pkl_dir'], {}), '(pkl_dir)\n', (1143, 1152), False, 'import os\n'), ((388, 416), 'skimage.external.tifffile.imread', 'tiffreader.imread', (['file_name'], {}), '(file_name)\n', (405, 416), True, 'import skimage.external.tifffile as tiffreader\n'), ((492, 512), 'numpy.array', 'np.array', (['image.size'], {}), '(image.size)\n', (500, 512), True, 'import numpy as np\n'), ((923, 963), 'os.path.join', 'os.path.join', (['tif_dir', '(name_format % idx)'], {}), '(tif_dir, name_format % idx)\n', (935, 963), False, 'import os\n'), ((1198, 1248), 'os.path.join', 'os.path.join', (['pkl_dir', "('%s_%s.pkl' % (name, label))"], {}), "(pkl_dir, '%s_%s.pkl' % (name, label))\n", (1210, 1248), False, 'import os\n'), ((319, 340), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (329, 340), False, 'from PIL import Image\n'), ((437, 459), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (452, 459), False, 'from PIL import Image\n')] |
def scatterg(x,y,label,num_labels,x1_axis,x2_axis,ptitle):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as palette
label=np.asarray(label)
label=label.astype(int)
# Create palette
palette = palette.rainbow(np.linspace(0,1,num_labels + 1));
colors = palette[label.astype(np.int64)+1,:];
plt.figure()
plt.scatter(x,y,c=colors)
plt.title(ptitle)
plt.xlabel(x1_axis)
plt.ylabel(x2_axis)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"numpy.asarray",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((168, 185), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (178, 185), True, 'import numpy as np\n'), ((362, 374), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (372, 374), True, 'import matplotlib.pyplot as plt\n'), ((379, 406), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'colors'}), '(x, y, c=colors)\n', (390, 406), True, 'import matplotlib.pyplot as plt\n'), ((409, 426), 'matplotlib.pyplot.title', 'plt.title', (['ptitle'], {}), '(ptitle)\n', (418, 426), True, 'import matplotlib.pyplot as plt\n'), ((431, 450), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x1_axis'], {}), '(x1_axis)\n', (441, 450), True, 'import matplotlib.pyplot as plt\n'), ((455, 474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['x2_axis'], {}), '(x2_axis)\n', (465, 474), True, 'import matplotlib.pyplot as plt\n'), ((269, 302), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(num_labels + 1)'], {}), '(0, 1, num_labels + 1)\n', (280, 302), True, 'import numpy as np\n')] |
""" Normalization methods """
import numpy
from math import sqrt
from numpy import amin
from numpy import amax
class get_methods:
def __new__(self, mtype = 'ALL', include_un = True):
if(type(mtype) == list):
return mtype
elif(type(mtype) == str):
if(mtype == 'ALL'):
mths = ['MC','ZS','PS','VSS','PT','MM','MX','DS','MD','TH','VTH','SG']
elif(mtype == 'MS'):
mths = ['MC','ZS','PS','VSS','PT','TH','VTH','SG']
elif(mtype == 'MM'):
mths = ['MM','MX']
elif(mtype == 'SC'):
mths = ['MC','ZS','PS','VSS','MM','MX','DS','MD']
elif(mtype == 'TS'):
mths = ['PT','TH','VTH','SG']
else:
print('Unknown type!!!')
return
if(include_un == True):
return ['UN'] + mths
else:
return mths
else:
print('Error. Wrong input')
class wrapper:
def meancenter(self,train,test):
mn = numpy.mean(train)
n_train = train - mn
n_test = test - mn
return n_train,n_test
def zscore(self,train,test):
mn = numpy.mean(train)
st = numpy.std(test)
if(st == 0):
st = numpy.finfo(numpy.float).eps
n_train = (train - mn) / st
n_test = (test - mn) / st
return n_train,n_test
def paretoscaling(self,train,test):
mn = numpy.mean(train)
st = numpy.std(test)
if(st == 0):
st = numpy.finfo(numpy.float).eps
n_train = (train - mn) / sqrt(st)
n_test = (test - mn) / sqrt(st)
return n_train,n_test
def vss(self,train,test):
mn = numpy.mean(train)
st = numpy.std(train)
if(st == 0):
st = numpy.finfo(numpy.float).eps
cv = mn / st
x1 = (train - mn) / st
x2 = (test - mn) / st
n_train = x1 * cv
n_test = x2 * cv
return n_train,n_test
def power(self,train, test):
train = train - amin(train, axis=0)
test = test - amin(test, axis=0)
train = numpy.sqrt(train)
test = numpy.sqrt(test)
n_train,n_test = self.meancenter(train, test)
return n_train,n_test
def minmax(self,train,test,ntype):
mn = amin(train,axis=0)
mx = amax(test,axis=0)
x1 = (train - mn) / (numpy.finfo(numpy.float).eps + mx - mn)
x2 = (test - mn) / (numpy.finfo(numpy.float).eps + mx - mn)
if(ntype == 1):
n_train = x1
n_test = x2
elif(ntype == -1):
n_train = x1 * (mx - mn) - mn
n_test = x2 * (mx - mn) - mn
return n_train,n_test
def maxnorm(self,train,test):
mx = amax(test,axis=0)
n_train = train/(numpy.finfo(numpy.float).eps + mx)
n_test = test/(numpy.finfo(numpy.float).eps + mx)
return n_train,n_test
def decscale(self,train, test):
train = train - amin(train, axis=0)
test = test - amin(test, axis=0)
mx = amax(train,axis=0)
f_range = numpy.ceil(numpy.log10(max(mx,numpy.finfo(numpy.float).eps)))
n_train = train/numpy.power(10,f_range)
n_test = test/numpy.power(10,f_range)
return n_train,n_test
def mmad(self,train, test):
med = numpy.median(train)
x1 = abs(train - med)
mad_value = numpy.median(x1)
if(mad_value == 0):
mad_value = numpy.finfo(numpy.float).eps
n_train = (train - med) / mad_value
n_test = (test - med) / mad_value
return n_train,n_test
def hampel(self,train, test):
med = numpy.median(train)
y1 = train - med
abs_y1 = abs(y1)
a = numpy.quantile(abs_y1, 0.70)
b = numpy.quantile(abs_y1, 0.85)
c = numpy.quantile(abs_y1, 0.95)
x1 = numpy.zeros(train.shape)
for j in range(0,train.shape[0]):
if (abs_y1[j] >= 0 and abs_y1[j] <= a):
x1[j] = y1[j]
elif(abs_y1[j] > a and abs_y1[j] <= b):
x1[j] = a * numpy.sign(y1[j])
elif(abs_y1[j] > b and abs_y1[j] <= c):
tmp = a * numpy.sign(y1[j])
x1[j] = tmp * ((c - abs_y1[j]) / (c - b))
elif(abs_y1[j] > c):
x1[j] = 0
mn = numpy.mean(x1)
st = numpy.std(x1)
n_train = 0.5 * (numpy.tanh(0.01 * ((train - mn) / (st + numpy.finfo(numpy.float).eps))) + 1)
n_test = 0.5 * (numpy.tanh(0.01 * ((test - mn) / (st + numpy.finfo(numpy.float).eps))) + 1)
return n_train,n_test
def hampelsimple(self,train, test):
mn = numpy.mean(train)
st = numpy.std(train)
if(st == 0):
st = numpy.finfo(numpy.float).eps
n_train = 0.5 * (numpy.tanh(0.01 * ((train - mn) / st)) + 1)
n_test = 0.5 * (numpy.tanh(0.01 * ((test - mn) / st)) + 1)
return n_train,n_test
def signorm(self,train, test):
mn = numpy.mean(train)
st = numpy.std(train)
if(st == 0):
st = numpy.finfo(numpy.float).eps
x1 = (train - mn) / st
x2 = (test - mn) / st
n_train = (1 - numpy.exp(-x1)) / (1+numpy.exp(-x1))
n_test = (1 - numpy.exp(-x2)) / (1+numpy.exp(-x2))
return n_train,n_test
def ndata(self, train, test, solution, d_dict):
n_train = numpy.zeros(train.shape)
n_test = numpy.zeros(test.shape)
feat = train.shape[1]
# print(d_dict)
# print(solution.shape)
for i in range(0,feat):
# print(solution[i])
method = d_dict.get(int(solution[i]))
if(method == None):
print('Method not specified')
return
if(method == 'UN'):
n_train[:,i], n_test[:,i] = train[:,i], test[:,i]
elif(method == 'MC'):
n_train[:,i], n_test[:,i] = self.meancenter(train[:,i], test[:,i])
elif(method == 'ZS'):
n_train[:,i], n_test[:,i] = self.zscore(train[:,i], test[:,i])
elif(method == 'PS'):
n_train[:,i], n_test[:,i] = self.paretoscaling(train[:,i], test[:,i])
elif(method == 'VSS'):
n_train[:,i], n_test[:,i] = self.vss(train[:,i], test[:,i])
elif(method == 'PT'):
n_train[:,i], n_test[:,i] = self.power(train[:,i], test[:,i])
elif(method == 'MM'):
n_train[:,i], n_test[:,i] = self.minmax(train[:,i], test[:,i], 1)
elif(method == 'MX'):
n_train[:,i], n_test[:,i] = self.maxnorm(train[:,i], test[:,i])
elif(method == 'DS'):
n_train[:,i], n_test[:,i] = self.decscale(train[:,i], test[:,i])
elif(method == 'MD'):
n_train[:,i], n_test[:,i] = self.mmad(train[:,i], test[:,i])
elif(method == 'TH'):
n_train[:,i], n_test[:,i] = self.hampel(train[:,i], test[:,i])
elif(method == 'VTH'):
n_train[:,i], n_test[:,i] = self.hampelsimple(train[:,i], test[:,i])
elif(method == 'SG'):
n_train[:,i], n_test[:,i] = self.signorm(train[:,i], test[:,i])
return n_train, n_test | [
"numpy.quantile",
"numpy.amin",
"math.sqrt",
"numpy.tanh",
"numpy.std",
"numpy.median",
"numpy.power",
"numpy.zeros",
"numpy.amax",
"numpy.finfo",
"numpy.mean",
"numpy.exp",
"numpy.sign",
"numpy.sqrt"
] | [((1071, 1088), 'numpy.mean', 'numpy.mean', (['train'], {}), '(train)\n', (1081, 1088), False, 'import numpy\n'), ((1226, 1243), 'numpy.mean', 'numpy.mean', (['train'], {}), '(train)\n', (1236, 1243), False, 'import numpy\n'), ((1257, 1272), 'numpy.std', 'numpy.std', (['test'], {}), '(test)\n', (1266, 1272), False, 'import numpy\n'), ((1498, 1515), 'numpy.mean', 'numpy.mean', (['train'], {}), '(train)\n', (1508, 1515), False, 'import numpy\n'), ((1529, 1544), 'numpy.std', 'numpy.std', (['test'], {}), '(test)\n', (1538, 1544), False, 'import numpy\n'), ((1768, 1785), 'numpy.mean', 'numpy.mean', (['train'], {}), '(train)\n', (1778, 1785), False, 'import numpy\n'), ((1799, 1815), 'numpy.std', 'numpy.std', (['train'], {}), '(train)\n', (1808, 1815), False, 'import numpy\n'), ((2185, 2202), 'numpy.sqrt', 'numpy.sqrt', (['train'], {}), '(train)\n', (2195, 2202), False, 'import numpy\n'), ((2218, 2234), 'numpy.sqrt', 'numpy.sqrt', (['test'], {}), '(test)\n', (2228, 2234), False, 'import numpy\n'), ((2372, 2391), 'numpy.amin', 'amin', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (2376, 2391), False, 'from numpy import amin\n'), ((2404, 2422), 'numpy.amax', 'amax', (['test'], {'axis': '(0)'}), '(test, axis=0)\n', (2408, 2422), False, 'from numpy import amax\n'), ((2840, 2858), 'numpy.amax', 'amax', (['test'], {'axis': '(0)'}), '(test, axis=0)\n', (2844, 2858), False, 'from numpy import amax\n'), ((3141, 3160), 'numpy.amax', 'amax', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (3145, 3160), False, 'from numpy import amax\n'), ((3419, 3438), 'numpy.median', 'numpy.median', (['train'], {}), '(train)\n', (3431, 3438), False, 'import numpy\n'), ((3489, 3505), 'numpy.median', 'numpy.median', (['x1'], {}), '(x1)\n', (3501, 3505), False, 'import numpy\n'), ((3758, 3777), 'numpy.median', 'numpy.median', (['train'], {}), '(train)\n', (3770, 3777), False, 'import numpy\n'), ((3840, 3867), 'numpy.quantile', 'numpy.quantile', (['abs_y1', '(0.7)'], {}), '(abs_y1, 0.7)\n', (3854, 3867), False, 'import numpy\n'), ((3881, 3909), 'numpy.quantile', 'numpy.quantile', (['abs_y1', '(0.85)'], {}), '(abs_y1, 0.85)\n', (3895, 3909), False, 'import numpy\n'), ((3922, 3950), 'numpy.quantile', 'numpy.quantile', (['abs_y1', '(0.95)'], {}), '(abs_y1, 0.95)\n', (3936, 3950), False, 'import numpy\n'), ((3964, 3988), 'numpy.zeros', 'numpy.zeros', (['train.shape'], {}), '(train.shape)\n', (3975, 3988), False, 'import numpy\n'), ((4437, 4451), 'numpy.mean', 'numpy.mean', (['x1'], {}), '(x1)\n', (4447, 4451), False, 'import numpy\n'), ((4465, 4478), 'numpy.std', 'numpy.std', (['x1'], {}), '(x1)\n', (4474, 4478), False, 'import numpy\n'), ((4771, 4788), 'numpy.mean', 'numpy.mean', (['train'], {}), '(train)\n', (4781, 4788), False, 'import numpy\n'), ((4802, 4818), 'numpy.std', 'numpy.std', (['train'], {}), '(train)\n', (4811, 4818), False, 'import numpy\n'), ((5105, 5122), 'numpy.mean', 'numpy.mean', (['train'], {}), '(train)\n', (5115, 5122), False, 'import numpy\n'), ((5136, 5152), 'numpy.std', 'numpy.std', (['train'], {}), '(train)\n', (5145, 5152), False, 'import numpy\n'), ((5505, 5529), 'numpy.zeros', 'numpy.zeros', (['train.shape'], {}), '(train.shape)\n', (5516, 5529), False, 'import numpy\n'), ((5547, 5570), 'numpy.zeros', 'numpy.zeros', (['test.shape'], {}), '(test.shape)\n', (5558, 5570), False, 'import numpy\n'), ((1645, 1653), 'math.sqrt', 'sqrt', (['st'], {}), '(st)\n', (1649, 1653), False, 'from math import sqrt\n'), ((1685, 1693), 'math.sqrt', 'sqrt', (['st'], {}), '(st)\n', (1689, 1693), False, 'from math import sqrt\n'), ((2108, 2127), 'numpy.amin', 'amin', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (2112, 2127), False, 'from numpy import amin\n'), ((2150, 2168), 'numpy.amin', 'amin', (['test'], {'axis': '(0)'}), '(test, axis=0)\n', (2154, 2168), False, 'from numpy import amin\n'), ((3067, 3086), 'numpy.amin', 'amin', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (3071, 3086), False, 'from numpy import amin\n'), ((3109, 3127), 'numpy.amin', 'amin', (['test'], {'axis': '(0)'}), '(test, axis=0)\n', (3113, 3127), False, 'from numpy import amin\n'), ((3264, 3288), 'numpy.power', 'numpy.power', (['(10)', 'f_range'], {}), '(10, f_range)\n', (3275, 3288), False, 'import numpy\n'), ((3310, 3334), 'numpy.power', 'numpy.power', (['(10)', 'f_range'], {}), '(10, f_range)\n', (3321, 3334), False, 'import numpy\n'), ((1311, 1335), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (1322, 1335), False, 'import numpy\n'), ((1583, 1607), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (1594, 1607), False, 'import numpy\n'), ((1854, 1878), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (1865, 1878), False, 'import numpy\n'), ((3558, 3582), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (3569, 3582), False, 'import numpy\n'), ((4857, 4881), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (4868, 4881), False, 'import numpy\n'), ((4911, 4949), 'numpy.tanh', 'numpy.tanh', (['(0.01 * ((train - mn) / st))'], {}), '(0.01 * ((train - mn) / st))\n', (4921, 4949), False, 'import numpy\n'), ((4979, 5016), 'numpy.tanh', 'numpy.tanh', (['(0.01 * ((test - mn) / st))'], {}), '(0.01 * ((test - mn) / st))\n', (4989, 5016), False, 'import numpy\n'), ((5191, 5215), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (5202, 5215), False, 'import numpy\n'), ((5304, 5318), 'numpy.exp', 'numpy.exp', (['(-x1)'], {}), '(-x1)\n', (5313, 5318), False, 'import numpy\n'), ((5325, 5339), 'numpy.exp', 'numpy.exp', (['(-x1)'], {}), '(-x1)\n', (5334, 5339), False, 'import numpy\n'), ((5363, 5377), 'numpy.exp', 'numpy.exp', (['(-x2)'], {}), '(-x2)\n', (5372, 5377), False, 'import numpy\n'), ((5384, 5398), 'numpy.exp', 'numpy.exp', (['(-x2)'], {}), '(-x2)\n', (5393, 5398), False, 'import numpy\n'), ((2883, 2907), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (2894, 2907), False, 'import numpy\n'), ((2941, 2965), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (2952, 2965), False, 'import numpy\n'), ((2451, 2475), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (2462, 2475), False, 'import numpy\n'), ((2519, 2543), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (2530, 2543), False, 'import numpy\n'), ((3208, 3232), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (3219, 3232), False, 'import numpy\n'), ((4193, 4210), 'numpy.sign', 'numpy.sign', (['y1[j]'], {}), '(y1[j])\n', (4203, 4210), False, 'import numpy\n'), ((4289, 4306), 'numpy.sign', 'numpy.sign', (['y1[j]'], {}), '(y1[j])\n', (4299, 4306), False, 'import numpy\n'), ((4544, 4568), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (4555, 4568), False, 'import numpy\n'), ((4644, 4668), 'numpy.finfo', 'numpy.finfo', (['numpy.float'], {}), '(numpy.float)\n', (4655, 4668), False, 'import numpy\n')] |
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
_backend_params = (
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@testing.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'axes': [1, 2], 'offsets': 0},
{'axes': [1, 2], 'offsets': [0, 1, 1]},
{'axes': 1, 'offsets': 1},
{'axes': 1, 'offsets': [0, 1, 1]},
{'axes': [], 'offsets': 0, 'new_axes': 0},
{'axes': [], 'offsets': 0, 'new_axes': 2},
{'axes': [], 'offsets': 0, 'new_axes': 3},
{'slices': (1, -1, 0)},
{'slices': (1, -1)},
{'slices': (1, Ellipsis, -1)},
{'slices': (1, None, Ellipsis, None, -1)},
]
))
class TestGetItem(testing.FunctionTestCase):
def setUp(self):
shape = (4, 2, 1)
if not hasattr(self, 'slices'):
axes = self.axes
offsets = self.offsets
# Convert axes, offsets and shape to slices
if isinstance(offsets, int):
offsets = tuple([offsets] * len(shape))
if isinstance(axes, int):
axes = tuple([axes])
slices = [slice(None)] * len(shape)
for axis in axes:
slices[axis] = slice(
offsets[axis], offsets[axis] + shape[axis])
if hasattr(self, 'new_axes'):
slices.insert(self.new_axes, None)
self.axes = axes
self.offsets = offsets
self.slices = tuple(slices)
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.get_item(x, self.slices)
return y,
def forward_expected(self, inputs):
x, = inputs
y = x[self.slices]
return numpy.asarray(y),
@testing.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'slices': []},
{'slices': ([],)},
{'slices': ([[]],)},
{'slices': numpy.array([], dtype=numpy.bool)},
{'slices': (1, [1])},
{'slices': ([1], slice(1, 2))},
{'slices': [1, 0]},
{'slices': ([1, 0],)},
{'slices': numpy.array([[1, 0], [2, 3]])},
{'slices': ([1, 0], [1, 1])},
{'slices': ([1, 0], slice(None), [[1, 1], [1, 1]])},
{'slices': ([1, 0], slice(1, 2), [0, 0])},
{'slices': ([[1, 1], [1, 0]], slice(1, 2), 1)},
{'slices': numpy.array([True] * 18 + [False] * 6).reshape(4, 3, 2)},
{'slices': numpy.array([True, False, False, True])},
{'slices': (slice(None), numpy.array([True, False, True]))},
{'slices': numpy.array([False, False, False, False])},
{'slices': (3, 2, Ellipsis, 1)},
{'slices': (numpy.array(False)), 'input_shape': ()},
{'slices': (numpy.array(True)), 'input_shape': ()},
]
))
class TestGetItemAdvanced(testing.FunctionTestCase):
input_shape = (4, 3, 2)
def setUp(self):
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.input_shape).astype(self.dtype)
return x,
def _convert_slices(self, slices, device):
# Converts advanced indexing slices (of numpy.ndarray) to respective
# backend arrays.
if isinstance(slices, list):
return [self._convert_slices(a, device) for a in slices]
if isinstance(slices, tuple):
return tuple([self._convert_slices(a, device) for a in slices])
if isinstance(slices, numpy.ndarray):
return device.send(slices)
return slices
def forward(self, inputs, device):
x, = inputs
slices = self._convert_slices(self.slices, device)
y = functions.get_item(x, slices)
return y,
def forward_expected(self, inputs):
x, = inputs
y = x[self.slices]
return numpy.asarray(y),
@testing.parameterize(
{'slices': ([1, 0], [1, 1]), 'sliced_shape': (2, 2)},
{'slices': ([1, 0], slice(None), [[1, 1], [1, 1]]),
'sliced_shape': (2, 2, 3)},
{'slices': ([1, 0], [1, 1], [0, 0]), 'sliced_shape': (2,)},
{'slices': (slice(None), numpy.array([True, False, True])),
'sliced_shape': (4, 2, 2)},
)
class TestCupyIndicesGetItem(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (4, 3, 2)).astype(numpy.float32)
self.gy_data = numpy.random.uniform(
-1, 1, self.sliced_shape).astype(numpy.float32)
def check_forward(self, x_data):
slices = []
for i, s in enumerate(self.slices):
if isinstance(s, numpy.ndarray):
s = chainer.backends.cuda.cupy.array(s)
if isinstance(s, list):
s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
slices.append(s)
slices = tuple(slices)
x = chainer.Variable(x_data)
y = functions.get_item(x, slices)
self.assertEqual(y.data.dtype, numpy.float32)
numpy.testing.assert_equal(cuda.to_cpu(x_data)[self.slices],
cuda.to_cpu(y.data))
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x_data))
def check_backward(self, x_data, y_grad):
slices = []
for i, s in enumerate(self.slices):
if isinstance(s, numpy.ndarray):
s = chainer.backends.cuda.cupy.array(s)
if isinstance(s, list):
s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
slices.append(s)
slices = tuple(slices)
def f(x):
return functions.get_item(x, slices)
gradient_check.check_backward(
f, (x_data,), y_grad, dtype='d')
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.gy_data))
class TestInvalidGetItem(unittest.TestCase):
def setUp(self):
self.default_debug = chainer.is_debug()
chainer.set_debug(True)
self.x_data = numpy.random.uniform(-1, 1, (4, 3, 2))
def tearDown(self):
chainer.set_debug(self.default_debug)
def test_multiple_ellipsis(self):
with self.assertRaises(ValueError):
functions.get_item(self.x_data, (Ellipsis, Ellipsis))
testing.run_module(__name__, __file__)
| [
"chainer.testing.product",
"chainer.Variable",
"chainer.gradient_check.check_backward",
"numpy.random.uniform",
"chainer.backends.cuda.to_cpu",
"chainer.backends.cuda.to_gpu",
"chainer.is_debug",
"chainer.set_debug",
"numpy.asarray",
"chainer.backends.cuda.cupy.array",
"numpy.array",
"chainer.... | [((611, 662), 'chainer.testing.inject_backend_tests', 'testing.inject_backend_tests', (['None', '_backend_params'], {}), '(None, _backend_params)\n', (639, 662), False, 'from chainer import testing\n'), ((2586, 2637), 'chainer.testing.inject_backend_tests', 'testing.inject_backend_tests', (['None', '_backend_params'], {}), '(None, _backend_params)\n', (2614, 2637), False, 'from chainer import testing\n'), ((7314, 7352), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (7332, 7352), False, 'from chainer import testing\n'), ((248, 320), 'chainer.testing.product', 'testing.product', (["{'use_cuda': [False], 'use_ideep': ['never', 'always']}"], {}), "({'use_cuda': [False], 'use_ideep': ['never', 'always']})\n", (263, 320), False, 'from chainer import testing\n'), ((2409, 2443), 'chainer.functions.get_item', 'functions.get_item', (['x', 'self.slices'], {}), '(x, self.slices)\n', (2427, 2443), False, 'from chainer import functions\n'), ((686, 1196), 'chainer.testing.product_dict', 'testing.product_dict', (["[{'dtype': numpy.float16}, {'dtype': numpy.float32}, {'dtype': numpy.float64}]", "[{'axes': [1, 2], 'offsets': 0}, {'axes': [1, 2], 'offsets': [0, 1, 1]}, {\n 'axes': 1, 'offsets': 1}, {'axes': 1, 'offsets': [0, 1, 1]}, {'axes': [\n ], 'offsets': 0, 'new_axes': 0}, {'axes': [], 'offsets': 0, 'new_axes':\n 2}, {'axes': [], 'offsets': 0, 'new_axes': 3}, {'slices': (1, -1, 0)},\n {'slices': (1, -1)}, {'slices': (1, Ellipsis, -1)}, {'slices': (1, None,\n Ellipsis, None, -1)}]"], {}), "([{'dtype': numpy.float16}, {'dtype': numpy.float32}, {\n 'dtype': numpy.float64}], [{'axes': [1, 2], 'offsets': 0}, {'axes': [1,\n 2], 'offsets': [0, 1, 1]}, {'axes': 1, 'offsets': 1}, {'axes': 1,\n 'offsets': [0, 1, 1]}, {'axes': [], 'offsets': 0, 'new_axes': 0}, {\n 'axes': [], 'offsets': 0, 'new_axes': 2}, {'axes': [], 'offsets': 0,\n 'new_axes': 3}, {'slices': (1, -1, 0)}, {'slices': (1, -1)}, {'slices':\n (1, Ellipsis, -1)}, {'slices': (1, None, Ellipsis, None, -1)}])\n", (706, 1196), False, 'from chainer import testing\n'), ((4680, 4709), 'chainer.functions.get_item', 'functions.get_item', (['x', 'slices'], {}), '(x, slices)\n', (4698, 4709), False, 'from chainer import functions\n'), ((5842, 5866), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (5858, 5866), False, 'import chainer\n'), ((5879, 5908), 'chainer.functions.get_item', 'functions.get_item', (['x', 'slices'], {}), '(x, slices)\n', (5897, 5908), False, 'from chainer import functions\n'), ((6648, 6710), 'chainer.gradient_check.check_backward', 'gradient_check.check_backward', (['f', '(x_data,)', 'y_grad'], {'dtype': '"""d"""'}), "(f, (x_data,), y_grad, dtype='d')\n", (6677, 6710), False, 'from chainer import gradient_check\n'), ((6979, 6997), 'chainer.is_debug', 'chainer.is_debug', ([], {}), '()\n', (6995, 6997), False, 'import chainer\n'), ((7006, 7029), 'chainer.set_debug', 'chainer.set_debug', (['(True)'], {}), '(True)\n', (7023, 7029), False, 'import chainer\n'), ((7053, 7091), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(4, 3, 2)'], {}), '(-1, 1, (4, 3, 2))\n', (7073, 7091), False, 'import numpy\n'), ((7125, 7162), 'chainer.set_debug', 'chainer.set_debug', (['self.default_debug'], {}), '(self.default_debug)\n', (7142, 7162), False, 'import chainer\n'), ((2565, 2581), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (2578, 2581), False, 'import numpy\n'), ((4831, 4847), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (4844, 4847), False, 'import numpy\n'), ((6067, 6086), 'chainer.backends.cuda.to_cpu', 'cuda.to_cpu', (['y.data'], {}), '(y.data)\n', (6078, 6086), False, 'from chainer.backends import cuda\n'), ((6162, 6186), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x_data'], {}), '(self.x_data)\n', (6173, 6186), False, 'from chainer.backends import cuda\n'), ((6609, 6638), 'chainer.functions.get_item', 'functions.get_item', (['x', 'slices'], {}), '(x, slices)\n', (6627, 6638), False, 'from chainer import functions\n'), ((6800, 6824), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x_data'], {}), '(self.x_data)\n', (6811, 6824), False, 'from chainer.backends import cuda\n'), ((6854, 6879), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.gy_data'], {}), '(self.gy_data)\n', (6865, 6879), False, 'from chainer.backends import cuda\n'), ((5114, 5146), 'numpy.array', 'numpy.array', (['[True, False, True]'], {}), '([True, False, True])\n', (5125, 5146), False, 'import numpy\n'), ((7258, 7311), 'chainer.functions.get_item', 'functions.get_item', (['self.x_data', '(Ellipsis, Ellipsis)'], {}), '(self.x_data, (Ellipsis, Ellipsis))\n', (7276, 7311), False, 'from chainer import functions\n'), ((2261, 2299), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(4, 3, 2)'], {}), '(-1, 1, (4, 3, 2))\n', (2281, 2299), False, 'import numpy\n'), ((3988, 4033), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.input_shape'], {}), '(-1, 1, self.input_shape)\n', (4008, 4033), False, 'import numpy\n'), ((5277, 5315), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(4, 3, 2)'], {}), '(-1, 1, (4, 3, 2))\n', (5297, 5315), False, 'import numpy\n'), ((5374, 5420), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.sliced_shape'], {}), '(-1, 1, self.sliced_shape)\n', (5394, 5420), False, 'import numpy\n'), ((5623, 5658), 'chainer.backends.cuda.cupy.array', 'chainer.backends.cuda.cupy.array', (['s'], {}), '(s)\n', (5655, 5658), False, 'import chainer\n'), ((5715, 5769), 'chainer.backends.cuda.cupy.array', 'chainer.backends.cuda.cupy.array', (['s'], {'dtype': 'numpy.int32'}), '(s, dtype=numpy.int32)\n', (5747, 5769), False, 'import chainer\n'), ((5998, 6017), 'chainer.backends.cuda.to_cpu', 'cuda.to_cpu', (['x_data'], {}), '(x_data)\n', (6009, 6017), False, 'from chainer.backends import cuda\n'), ((6364, 6399), 'chainer.backends.cuda.cupy.array', 'chainer.backends.cuda.cupy.array', (['s'], {}), '(s)\n', (6396, 6399), False, 'import chainer\n'), ((6456, 6510), 'chainer.backends.cuda.cupy.array', 'chainer.backends.cuda.cupy.array', (['s'], {'dtype': 'numpy.int32'}), '(s, dtype=numpy.int32)\n', (6488, 6510), False, 'import chainer\n'), ((2871, 2904), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'numpy.bool'}), '([], dtype=numpy.bool)\n', (2882, 2904), False, 'import numpy\n'), ((3040, 3069), 'numpy.array', 'numpy.array', (['[[1, 0], [2, 3]]'], {}), '([[1, 0], [2, 3]])\n', (3051, 3069), False, 'import numpy\n'), ((3356, 3395), 'numpy.array', 'numpy.array', (['[True, False, False, True]'], {}), '([True, False, False, True])\n', (3367, 3395), False, 'import numpy\n'), ((3480, 3521), 'numpy.array', 'numpy.array', (['[False, False, False, False]'], {}), '([False, False, False, False])\n', (3491, 3521), False, 'import numpy\n'), ((3579, 3597), 'numpy.array', 'numpy.array', (['(False)'], {}), '(False)\n', (3590, 3597), False, 'import numpy\n'), ((3637, 3654), 'numpy.array', 'numpy.array', (['(True)'], {}), '(True)\n', (3648, 3654), False, 'import numpy\n'), ((3428, 3460), 'numpy.array', 'numpy.array', (['[True, False, True]'], {}), '([True, False, True])\n', (3439, 3460), False, 'import numpy\n'), ((3282, 3320), 'numpy.array', 'numpy.array', (['([True] * 18 + [False] * 6)'], {}), '([True] * 18 + [False] * 6)\n', (3293, 3320), False, 'import numpy\n')] |
from __future__ import print_function
import sys
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from Game import Game
from othello.OthelloLogic import Board
import numpy as np
from time import perf_counter
class OthelloGame(Game):
square_content = {
-1: "X",
+0: "-",
+1: "O"
}
@staticmethod
def getSquarePiece(piece):
return OthelloGame.square_content[piece]
def __init__(self, n):
self.n = n
def getInitBoard(self):
# return initial board (numpy board)
b = Board(self.n)
return np.array(b.pieces)
def getBoardSize(self):
# (a,b) tuple
return (self.n, self.n)
def getActionSize(self):
# return number of actions
return self.n*self.n + 1
def getNextState(self, board, player, action):
# if player takes action on board, return next (board,player)
# action must be a legal move
if action == self.n*self.n:
return (board, -player)
b = Board(self.n)
b.pieces = np.copy(board)
move = (int(action/self.n), action%self.n)
b.execute_move(move, player)
return (b.pieces, -player)
def getLegalMoves(self, board, player):
# return a fixed size binary vector
legal = [0]*self.getActionSize()
b = Board(self.n)
b.pieces = np.copy(board)
legalMoves = b.get_legal_moves(player)
if len(legalMoves)==0:
legal[-1]=1
return np.array(legal)
for x, y in legalMoves:
legal[self.n*x+y]=1
return np.array(legal)
def getGameEnded(self, board, player):
# return 0 if not ended, 1 if player 1 won, -1 if player 1 lost
# player = 1
b = Board(self.n)
b.pieces = np.copy(board)
if b.has_legal_moves(player):
return 0
if b.has_legal_moves(-player):
return 0
dif = b.countDiff(player)
if dif > 0:
return 1
if dif == 0:
return -0.05
return -1
def getCanonicalForm(self, board, player):
# return state if player==1, else return -state if player==-1
return player*board
def getSymmetries(self, board, pi):
# mirror, rotational
assert(len(pi) == self.n**2+1) # 1 for pass
pi_board = np.reshape(pi[:-1], (self.n, self.n))
l = []
for i in range(1, 5):
for j in [True, False]:
newB = np.rot90(board, i)
newPi = np.rot90(pi_board, i)
if j:
newB = np.fliplr(newB)
newPi = np.fliplr(newPi)
l += [(newB, list(newPi.ravel()) + [pi[-1]])]
return l
def stringRepresentation(self, board):
return board.tostring()
def stringRepresentationReadable(self, board):
board_s = "".join(self.square_content[square] for row in board for square in row)
return board_s
def getScore(self, board, player):
b = Board(self.n)
b.pieces = np.copy(board)
return b.countDiff(player)
def action_to_move(self,action):
if action == None:
return None
if int(action/self.n)>=self.n:
return (self.n,0)
move = (int(action/self.n), action%self.n)
return move
def move_to_action(self,move):
action = self.n*move[0]+move[1]
return action
######################################################################################################
#TK the following functions are the value functions and other functions used by the value functions.
#For most of them, the bachelor paper explained how they are calculated.
def getCombinedValue(self,board,player):
player_moves = self.getLegalMoves(board,player)
opponent_moves = self.getLegalMoves(board,-player)
vdisc = self.getDiscValue(board,player)
vmob = self.getMobilityValue(board,player,player_moves,opponent_moves)
vcorner = self.getCornerValue(board,player,player_moves,opponent_moves)
vstability = self.getStabilityValue(board,player)
#print("coin: "+str(vdisc)+" mobility: "+str(vmob)+ " corner: "+ str(vcorner) + " stability: "+ str(vstability))
return 0.25*(vdisc+vmob+vcorner+vstability)
def get_corner_score(self,board,player,legalMoves):
score = 0
corner_squares = ((0,0),(0,self.n-1),(self.n-1,0),(self.n-1,self.n-1))
for x,y in corner_squares:
if board[x][y]==player:
score += 1
if legalMoves[self.move_to_action((x,y))] == 1:
score += 0.5
return score
def getCornerValue(self,board,player,player_moves,opponent_moves):
player_corner_score = self.get_corner_score(board,player,player_moves)
opponent_corner_score = self.get_corner_score(board,player*-1,opponent_moves)
if(player_corner_score+opponent_corner_score)!=0:
return (player_corner_score-opponent_corner_score)/(player_corner_score+opponent_corner_score)
return 0
def getDiscValue(self,board,player):
player_score = sum(board[board==player])
opponent_score = -sum(board[board==-player])
return (player_score-opponent_score)/(player_score+opponent_score)
def getMobilityValue(self,board,player,player_moves,opponent_moves):
pamv = len(player_moves[player_moves==1])
oamv = len(opponent_moves[opponent_moves==1])
empty_neighbours = []
if len(np.argwhere(board==-player))>0:
for pos in np.argwhere(board==-player):
empty_neighbours += self.get_empty_neighbours(board,pos)
unique_empty_neighbours = set(empty_neighbours)
ppmv = len(unique_empty_neighbours)
else:
ppmv = 0
empty_neighbours = []
if len(np.argwhere(board==player))>0:
for pos in np.argwhere(board==player):
empty_neighbours += self.get_empty_neighbours(board,pos)
unique_empty_neighbours = set(empty_neighbours)
opmv = len(unique_empty_neighbours)
else:
opmv = 0
if ((pamv +0.5*ppmv) + (oamv +0.5*opmv))!=0:
value = ((pamv +0.5*ppmv) - (oamv +0.5*opmv)) / ((pamv +0.5*ppmv) + (oamv +0.5*opmv))
else:
value = 0
return value
def get_empty_neighbours(self,board,position):
i = position[0]
j = position[1]
empty_neighbours = []
for x in range(max(0,i-1),min(i+2,self.n)):
for y in range(max(0,j-1),min(j+2,self.n)):
if (x != i or y !=j):
if board[x][y]==0:
empty_neighbours.append((x,y))
return empty_neighbours
def getEdgeStabilityMatrix(self,board,player):
## TK returns a matrix with 1 on every position of a stable edge disc
n = self.n
corners = ((0,0),(n-1,0),(n-1,n-1),(0,n-1))
upper_edge = []
lower_edge = []
left_edge = []
right_edge = []
for i in range(1,n-1):
upper_edge.append((i,0))
right_edge.append((n-1,i))
lower_edge.append((i,n-1))
left_edge.append((0,i))
edges = [upper_edge,right_edge,lower_edge,left_edge]
alignment = [(1,0),(0,1),(1,0),(0,1)]
stability_matrix = np.zeros((n,n))
for corner in corners:
if board[corner[0]][corner[1]]!=0:
stability_matrix[corner[0]][corner[1]] = 1
i = 0
for edge in edges:
temp_list = edge+ [corners[i%4]] +[corners[(i+1)%4]]
full_row = True
#TK if every square on an edge is filled, all coins on these squares are stable
for square in (temp_list):
if board[square[0]][square[1]]==0:
full_row = False
break
if full_row:
for square in (temp_list):
stability_matrix[square[0]][square[1]]=1
#TK Discs next to stable discs of the same color are stable aswell
#To check this, the squares of the edge are checked one after another in one direction.
#Each square is compared to the previous one and if the previous one has the same color
#and is stable, this disc is also stable.
#The same thing is than done for the over direction. Once no more discs are changed from
#unstable to stable, the loop ends.
changed = True
while changed == True:
changed = False
for field in edge:
prev_field = (field[0]-alignment[i][0],field[1]-alignment[i][1])
if stability_matrix[field[0]][field[1]]==0:
if board[prev_field[0]][prev_field[1]]!=0 and board[prev_field[0]][prev_field[1]] == board[field[0]][field[1]]:
if stability_matrix[prev_field[0]][prev_field[1]]==1:
stability_matrix[field[0]][field[1]] = 1
changed = True
for field in edge:
prev_field = (field[0]+alignment[i][0],field[1]+alignment[i][1])
if stability_matrix[field[0]][field[1]]==0:
if board[prev_field[0]][prev_field[1]]!=0 and board[prev_field[0]][prev_field[1]] == board[field[0]][field[1]]:
if stability_matrix[prev_field[0]][prev_field[1]]==1:
stability_matrix[field[0]][field[1]] = 1
changed = True
i +=1
return stability_matrix
def getStabilityValue(self,board,player):
edge_stability_matrix = self.getEdgeStabilityMatrix(board,player)
stable_coins = board*edge_stability_matrix
player_stable_coins = sum(stable_coins[stable_coins==player])
opponent_stable_coins = -sum(stable_coins[stable_coins==-player])
if(player_stable_coins+opponent_stable_coins)!=0:
return (player_stable_coins-opponent_stable_coins)/(player_stable_coins+opponent_stable_coins)
return 0
def getMatrixValue(self,board,maxplayer):
if self.n == 8:
weights = np.array(([ 4,-3, 2, 2, 2, 2,-3, 4],
[-3,-4,-1,-1,-1,-1,-4,-3],
[ 2,-1, 1, 0, 0, 1,-1, 2],
[ 2,-1, 0, 1, 1, 0,-1, 2],
[ 2,-1, 0, 1, 1, 0,-1, 2],
[ 2,-1, 1, 0, 0, 1,-1, 2],
[-3,-4,-1,-1,-1,-1,-4,-3],
[ 4,-3, 2, 2, 2, 2,-3, 4]))
if maxplayer==1:
return np.sum(weights*board)/112
return -np.sum(weights*board)/112
# 112 is the maximum difference possible with 56 for player 1 and -56 for player 2
if self.n == 6:
weights = np.array(([ 5,-3, 3, 3,-3, 5],
[-3,-4,-1,-1,-4,-3],
[ 3,-1, 1, 1,-1, 3],
[ 3,-1, 1, 1,-1, 3],
[-3,-4,-1,-1,-4,-3],
[ 5,-3, 3, 3,-3, 5]))
if maxplayer==1:
return np.sum(weights*board)/96
return -np.sum(weights*board)/96
# 96 is the maximum difference possible with 48 for player 1 and -48 for player 2
| [
"numpy.sum",
"numpy.copy",
"os.path.dirname",
"numpy.zeros",
"sys.path.insert",
"numpy.fliplr",
"numpy.rot90",
"numpy.array",
"numpy.reshape",
"inspect.currentframe",
"numpy.argwhere",
"othello.OthelloLogic.Board"
] | [((170, 197), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (185, 197), False, 'import os, sys, inspect\n'), ((198, 227), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (213, 227), False, 'import os, sys, inspect\n'), ((678, 691), 'othello.OthelloLogic.Board', 'Board', (['self.n'], {}), '(self.n)\n', (683, 691), False, 'from othello.OthelloLogic import Board\n'), ((707, 725), 'numpy.array', 'np.array', (['b.pieces'], {}), '(b.pieces)\n', (715, 725), True, 'import numpy as np\n'), ((1151, 1164), 'othello.OthelloLogic.Board', 'Board', (['self.n'], {}), '(self.n)\n', (1156, 1164), False, 'from othello.OthelloLogic import Board\n'), ((1184, 1198), 'numpy.copy', 'np.copy', (['board'], {}), '(board)\n', (1191, 1198), True, 'import numpy as np\n'), ((1464, 1477), 'othello.OthelloLogic.Board', 'Board', (['self.n'], {}), '(self.n)\n', (1469, 1477), False, 'from othello.OthelloLogic import Board\n'), ((1497, 1511), 'numpy.copy', 'np.copy', (['board'], {}), '(board)\n', (1504, 1511), True, 'import numpy as np\n'), ((1729, 1744), 'numpy.array', 'np.array', (['legal'], {}), '(legal)\n', (1737, 1744), True, 'import numpy as np\n'), ((1894, 1907), 'othello.OthelloLogic.Board', 'Board', (['self.n'], {}), '(self.n)\n', (1899, 1907), False, 'from othello.OthelloLogic import Board\n'), ((1927, 1941), 'numpy.copy', 'np.copy', (['board'], {}), '(board)\n', (1934, 1941), True, 'import numpy as np\n'), ((2489, 2526), 'numpy.reshape', 'np.reshape', (['pi[:-1]', '(self.n, self.n)'], {}), '(pi[:-1], (self.n, self.n))\n', (2499, 2526), True, 'import numpy as np\n'), ((3180, 3193), 'othello.OthelloLogic.Board', 'Board', (['self.n'], {}), '(self.n)\n', (3185, 3193), False, 'from othello.OthelloLogic import Board\n'), ((3213, 3227), 'numpy.copy', 'np.copy', (['board'], {}), '(board)\n', (3220, 3227), True, 'import numpy as np\n'), ((7606, 7622), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (7614, 7622), True, 'import numpy as np\n'), ((132, 154), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (152, 154), False, 'import os, sys, inspect\n'), ((1634, 1649), 'numpy.array', 'np.array', (['legal'], {}), '(legal)\n', (1642, 1649), True, 'import numpy as np\n'), ((5759, 5788), 'numpy.argwhere', 'np.argwhere', (['(board == -player)'], {}), '(board == -player)\n', (5770, 5788), True, 'import numpy as np\n'), ((6107, 6135), 'numpy.argwhere', 'np.argwhere', (['(board == player)'], {}), '(board == player)\n', (6118, 6135), True, 'import numpy as np\n'), ((10540, 10799), 'numpy.array', 'np.array', (['([4, -3, 2, 2, 2, 2, -3, 4], [-3, -4, -1, -1, -1, -1, -4, -3], [2, -1, 1, 0,\n 0, 1, -1, 2], [2, -1, 0, 1, 1, 0, -1, 2], [2, -1, 0, 1, 1, 0, -1, 2], [\n 2, -1, 1, 0, 0, 1, -1, 2], [-3, -4, -1, -1, -1, -1, -4, -3], [4, -3, 2,\n 2, 2, 2, -3, 4])'], {}), '(([4, -3, 2, 2, 2, 2, -3, 4], [-3, -4, -1, -1, -1, -1, -4, -3], [2,\n -1, 1, 0, 0, 1, -1, 2], [2, -1, 0, 1, 1, 0, -1, 2], [2, -1, 0, 1, 1, 0,\n -1, 2], [2, -1, 1, 0, 0, 1, -1, 2], [-3, -4, -1, -1, -1, -1, -4, -3], [\n 4, -3, 2, 2, 2, 2, -3, 4]))\n', (10548, 10799), True, 'import numpy as np\n'), ((11258, 11412), 'numpy.array', 'np.array', (['([5, -3, 3, 3, -3, 5], [-3, -4, -1, -1, -4, -3], [3, -1, 1, 1, -1, 3], [3, \n -1, 1, 1, -1, 3], [-3, -4, -1, -1, -4, -3], [5, -3, 3, 3, -3, 5])'], {}), '(([5, -3, 3, 3, -3, 5], [-3, -4, -1, -1, -4, -3], [3, -1, 1, 1, -1,\n 3], [3, -1, 1, 1, -1, 3], [-3, -4, -1, -1, -4, -3], [5, -3, 3, 3, -3, 5]))\n', (11266, 11412), True, 'import numpy as np\n'), ((2632, 2650), 'numpy.rot90', 'np.rot90', (['board', 'i'], {}), '(board, i)\n', (2640, 2650), True, 'import numpy as np\n'), ((2675, 2696), 'numpy.rot90', 'np.rot90', (['pi_board', 'i'], {}), '(pi_board, i)\n', (2683, 2696), True, 'import numpy as np\n'), ((5704, 5733), 'numpy.argwhere', 'np.argwhere', (['(board == -player)'], {}), '(board == -player)\n', (5715, 5733), True, 'import numpy as np\n'), ((6053, 6081), 'numpy.argwhere', 'np.argwhere', (['(board == player)'], {}), '(board == player)\n', (6064, 6081), True, 'import numpy as np\n'), ((2746, 2761), 'numpy.fliplr', 'np.fliplr', (['newB'], {}), '(newB)\n', (2755, 2761), True, 'import numpy as np\n'), ((2790, 2806), 'numpy.fliplr', 'np.fliplr', (['newPi'], {}), '(newPi)\n', (2799, 2806), True, 'import numpy as np\n'), ((11045, 11068), 'numpy.sum', 'np.sum', (['(weights * board)'], {}), '(weights * board)\n', (11051, 11068), True, 'import numpy as np\n'), ((11091, 11114), 'numpy.sum', 'np.sum', (['(weights * board)'], {}), '(weights * board)\n', (11097, 11114), True, 'import numpy as np\n'), ((11609, 11632), 'numpy.sum', 'np.sum', (['(weights * board)'], {}), '(weights * board)\n', (11615, 11632), True, 'import numpy as np\n'), ((11654, 11677), 'numpy.sum', 'np.sum', (['(weights * board)'], {}), '(weights * board)\n', (11660, 11677), True, 'import numpy as np\n')] |
# Code from Chapter 17 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2014
import numpy as np
class hopfield:
def __init__(self, inputs, synchronous=False, random=True):
self.nneurons = np.shape(inputs)[1]
self.weights = np.zeros((self.nneurons, self.nneurons))
self.activations = np.zeros((self.nneurons, 1))
self.synchronous = synchronous
self.random = random
def set_neurons(self, input):
self.activations = input
def update_neurons(self):
if self.synchronous:
# print self.weights*self.activations
act = np.sum(self.weights * self.activations, axis=1)
self.activations = np.where(act > 0, 1, -1)
# print self.activations
else:
order = np.arange(self.nneurons)
if self.random:
np.random.shuffle(order)
for i in order:
if np.sum(self.weights[i, :] * self.activations) > 0:
self.activations[i] = 1
else:
self.activations[i] = -1
return self.activations
def set_weights(self, inputs):
ninputs = np.shape(inputs)[0]
for i in range(self.nneurons):
for j in range(self.nneurons):
if i != j:
for k in range(ninputs):
self.weights[i, j] += inputs[k, i] * inputs[k, j]
self.weights /= ninputs
def compute_energy(self):
energy = 0
for i in range(self.nneurons):
for j in range(self.nneurons):
energy += self.weights[i, j] * self.activations[i] * self.activations[j]
return -0.5 * energy
def print_net(self):
print(self.weights)
print(self.compute_energy())
def print_out(self):
print(self.activations)
def learn_letters():
import scipy.io as sio
import pylab as pl
pl.ion()
nperclass = 39
classes = np.arange(20)
# classes = [0, 11, 17] # A, C, S
# classes = [10, 13, 28] # A, C, S
nclasses = len(classes)
# Read in the data and prepare it
data = sio.loadmat('binaryalphadigs.mat')
inputs = np.ones((nclasses, nperclass, 20 * 16))
labels = np.zeros((nclasses, nperclass, nclasses))
for k in range(nclasses):
for m in range(nperclass):
inputs[k, m, :] = (data['dat'][classes[k], m].ravel()).astype('float')
labels[k, m, k] = 1.
inputs = np.where(inputs == 0, -1, 1)
v = inputs[:, 0, :].reshape(nclasses, 20 * 16)
l = labels[:, 0, :].reshape(nclasses, nclasses)
# Train a Hopfield network
import hopfield
h = hopfield.hopfield(v[:10, :])
h.set_weights(v[:10, :])
# This is the training set
pl.figure(),
# pl.title('Training Data')
pl.suptitle('Training Data', fontsize=14)
for i in range(10):
pl.subplot(2, 5, i), pl.imshow(v[i, :].reshape(20, 16), cmap=pl.cm.gray), pl.axis('off')
# which = 2
# mask = np.ones(20*16)
# x = np.random.randint(320,size=20)
# mask[x] = -1
# h.set_neurons(v[which,:]*mask)
# print h.compute_energy()
# nrec = 3
# new = np.zeros((320,nrec))
# for i in range(1,nrec):
# new[:,i] = h.update_neurons()
# print h.compute_energy()
# pl.figure(), pl.imshow(new[:,i].reshape(20,16),cmap=pl.cm.gray,interpolation='nearest'), pl.title('Noisy Image. Reconstruction Step %s'%i), pl.axis('off')
# pl.figure(), pl.imshow((v[which,:]).reshape(20,16),cmap=pl.cm.gray,interpolation='nearest'), pl.title('Original Image.'), pl.axis('off')
# pl.figure(), pl.imshow((v[which,:]*mask).reshape(20,16),cmap=pl.cm.gray,interpolation='nearest'), pl.title('Noisy Image.'), pl.axis('off')
which = 12
h.set_neurons(v[which, :])
print(h.compute_energy())
pl.figure(), pl.imshow((v[which, :]).reshape(20, 16), cmap=pl.cm.gray, interpolation='nearest'), pl.title(
'Novel Image.'), pl.axis('off')
nrec = 5
new2 = np.zeros((320, nrec))
for i in range(nrec):
new2[:, i] = h.update_neurons()
print(h.compute_energy())
pl.figure(), pl.imshow(new2[:, i].reshape(20, 16), cmap=pl.cm.gray, interpolation='nearest'), pl.title(
'Novel Image. Reconstruction Step %s' % i), pl.axis('off')
def test_hopfield():
import hopfield
inputs = np.array([[1, 1, 1, 1, -1, -1, -1, -1],
[1, -1, 1, -1, 1, -1, 1, -1]])
print(inputs)
# h = hopfield.hopfield(inputs)
h = hopfield.hopfield(inputs, synchronous=True)
# h = hopfield.hopfield(inputs,random=False)
print("Setting weights")
h.set_weights(inputs)
h.print_net()
print("Input 0")
print(inputs[0, :])
h.set_neurons(inputs[0, :])
h.update_neurons()
h.print_out()
print("------")
print("Input 1")
print(inputs[1, :])
h.set_neurons(inputs[1, :])
h.update_neurons()
h.print_out()
test_in = np.array([1, 1, 1, 1, 1, -1, -1, -1])
# test_in = np.array([1,1,1,-1, 1,-1,-1,-1])
h.set_neurons(test_in)
print(h.compute_energy())
h.print_out()
h.update_neurons()
print(h.compute_energy())
h.update_neurons()
print(h.compute_energy())
h.update_neurons()
print(h.compute_energy())
h.print_out()
def test_hopfield2():
import hopfield
inputs = np.array([[-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1],
[1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1]])
h = hopfield.hopfield(inputs)
# h = hopfield.hopfield(inputs,synchronous=True,random=False)
h.print_net()
print("Updating neurons")
h.update_neurons()
h.print_out()
print("Updating weights")
h.set_weights(inputs)
h.update_neurons()
h.print_net()
h.print_out()
print("------")
test_in = np.array([-1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1])
h.set_neurons(test_in)
h.print_out()
h.update_neurons()
h.print_out()
def show_reverse():
import hopfield
inputs = np.array([[1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1],
[1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, ]])
h = hopfield.hopfield(inputs)
h.set_weights(inputs)
import pylab as pl
pl.ion()
a = np.array([1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1])
pl.figure(), pl.imshow(a.reshape(4, 4), cmap=pl.cm.gray, interpolation='nearest'), pl.axis('off')
print(a.reshape(4, 4))
h.set_neurons(a)
print(h.compute_energy())
y = h.update_neurons()
pl.figure(), pl.imshow(y.reshape(4, 4), cmap=pl.cm.gray, interpolation='nearest'), pl.axis('off')
print(h.compute_energy())
h.update_neurons()
print(h.compute_energy())
out = h.update_neurons()
print(h.compute_energy())
# pl.figure(), pl.imshow(out.reshape(4,4),cmap=pl.cm.gray,interpolation='nearest'),pl.axis('off')
a = np.array([-1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1])
pl.figure(), pl.imshow(a.reshape(4, 4), cmap=pl.cm.gray, interpolation='nearest'), pl.axis('off')
print(a.reshape(4, 4))
h.set_neurons(a)
print(h.compute_energy())
x = h.activations.copy()
print(x.reshape(4, 4))
pl.figure(), pl.imshow(x.reshape(4, 4), cmap=pl.cm.gray, interpolation='nearest'), pl.axis('off')
print(h.compute_energy())
set = h.update_neurons()
print(set.reshape(4, 4))
print(h.compute_energy())
pl.figure(), pl.imshow(set.reshape(4, 4), cmap=pl.cm.gray, interpolation='nearest'), pl.axis('off')
# next = h.update_neurons()
# print h.compute_energy()
# pl.figure(), pl.imshow(next.reshape(4,4),cmap=pl.cm.gray,interpolation='nearest'),pl.axis('off')
# new = h.update_neurons()
# print h.compute_energy()
# pl.figure(), pl.imshow(new.reshape(4,4),cmap=pl.cm.gray,interpolation='nearest'),pl.axis('off')
pl.show()
# test_hopfield()
# learn_letters()
| [
"pylab.ion",
"pylab.show",
"pylab.title",
"hopfield.hopfield",
"scipy.io.loadmat",
"pylab.axis",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.shape",
"pylab.subplot",
"numpy.where",
"numpy.arange",
"pylab.suptitle",
"numpy.array",
"pylab.figure",
"numpy.random.shuffle"
] | [((2196, 2204), 'pylab.ion', 'pl.ion', ([], {}), '()\n', (2202, 2204), True, 'import pylab as pl\n'), ((2239, 2252), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (2248, 2252), True, 'import numpy as np\n'), ((2408, 2442), 'scipy.io.loadmat', 'sio.loadmat', (['"""binaryalphadigs.mat"""'], {}), "('binaryalphadigs.mat')\n", (2419, 2442), True, 'import scipy.io as sio\n'), ((2456, 2495), 'numpy.ones', 'np.ones', (['(nclasses, nperclass, 20 * 16)'], {}), '((nclasses, nperclass, 20 * 16))\n', (2463, 2495), True, 'import numpy as np\n'), ((2509, 2550), 'numpy.zeros', 'np.zeros', (['(nclasses, nperclass, nclasses)'], {}), '((nclasses, nperclass, nclasses))\n', (2517, 2550), True, 'import numpy as np\n'), ((2746, 2774), 'numpy.where', 'np.where', (['(inputs == 0)', '(-1)', '(1)'], {}), '(inputs == 0, -1, 1)\n', (2754, 2774), True, 'import numpy as np\n'), ((2940, 2968), 'hopfield.hopfield', 'hopfield.hopfield', (['v[:10, :]'], {}), '(v[:10, :])\n', (2957, 2968), False, 'import hopfield\n'), ((3083, 3124), 'pylab.suptitle', 'pl.suptitle', (['"""Training Data"""'], {'fontsize': '(14)'}), "('Training Data', fontsize=14)\n", (3094, 3124), True, 'import pylab as pl\n'), ((4269, 4290), 'numpy.zeros', 'np.zeros', (['(320, nrec)'], {}), '((320, nrec))\n', (4277, 4290), True, 'import numpy as np\n'), ((4630, 4700), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, -1, -1, -1, -1], [1, -1, 1, -1, 1, -1, 1, -1]]'], {}), '([[1, 1, 1, 1, -1, -1, -1, -1], [1, -1, 1, -1, 1, -1, 1, -1]])\n', (4638, 4700), True, 'import numpy as np\n'), ((4787, 4830), 'hopfield.hopfield', 'hopfield.hopfield', (['inputs'], {'synchronous': '(True)'}), '(inputs, synchronous=True)\n', (4804, 4830), False, 'import hopfield\n'), ((5226, 5263), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, -1, -1, -1]'], {}), '([1, 1, 1, 1, 1, -1, -1, -1])\n', (5234, 5263), True, 'import numpy as np\n'), ((5622, 5911), 'numpy.array', 'np.array', (['[[-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -\n 1, -1, -1, 1, -1, -1], [1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1,\n 1, -1, -1, -1, -1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1,\n 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1]]'], {}), '([[-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1, 1, -1, -1], [1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1,\n 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, -1, -1, -1,\n -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1]])\n', (5630, 5911), True, 'import numpy as np\n'), ((5955, 5980), 'hopfield.hopfield', 'hopfield.hopfield', (['inputs'], {}), '(inputs)\n', (5972, 5980), False, 'import hopfield\n'), ((6288, 6398), 'numpy.array', 'np.array', (['[-1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1]'], {}), '([-1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, \n 1, -1, -1, -1, -1, 1, -1, -1])\n', (6296, 6398), True, 'import numpy as np\n'), ((6536, 6666), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1], [1, -1, 1, -1, 1,\n -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1]]'], {}), '([[1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1], [1, -1,\n 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1]])\n', (6544, 6666), True, 'import numpy as np\n'), ((6696, 6721), 'hopfield.hopfield', 'hopfield.hopfield', (['inputs'], {}), '(inputs)\n', (6713, 6721), False, 'import hopfield\n'), ((6775, 6783), 'pylab.ion', 'pl.ion', ([], {}), '()\n', (6781, 6783), True, 'import pylab as pl\n'), ((6793, 6859), 'numpy.array', 'np.array', (['[1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1]'], {}), '([1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1])\n', (6801, 6859), True, 'import numpy as np\n'), ((7423, 7491), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1]'], {}), '([-1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1])\n', (7431, 7491), True, 'import numpy as np\n'), ((8388, 8397), 'pylab.show', 'pl.show', ([], {}), '()\n', (8395, 8397), True, 'import pylab as pl\n'), ((518, 558), 'numpy.zeros', 'np.zeros', (['(self.nneurons, self.nneurons)'], {}), '((self.nneurons, self.nneurons))\n', (526, 558), True, 'import numpy as np\n'), ((586, 614), 'numpy.zeros', 'np.zeros', (['(self.nneurons, 1)'], {}), '((self.nneurons, 1))\n', (594, 614), True, 'import numpy as np\n'), ((3034, 3045), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (3043, 3045), True, 'import pylab as pl\n'), ((4097, 4108), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (4106, 4108), True, 'import pylab as pl\n'), ((4194, 4218), 'pylab.title', 'pl.title', (['"""Novel Image."""'], {}), "('Novel Image.')\n", (4202, 4218), True, 'import pylab as pl\n'), ((4229, 4243), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (4236, 4243), True, 'import pylab as pl\n'), ((6864, 6875), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (6873, 6875), True, 'import pylab as pl\n'), ((6947, 6961), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (6954, 6961), True, 'import pylab as pl\n'), ((7071, 7082), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (7080, 7082), True, 'import pylab as pl\n'), ((7154, 7168), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (7161, 7168), True, 'import pylab as pl\n'), ((7496, 7507), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (7505, 7507), True, 'import pylab as pl\n'), ((7579, 7593), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (7586, 7593), True, 'import pylab as pl\n'), ((7733, 7744), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (7742, 7744), True, 'import pylab as pl\n'), ((7816, 7830), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (7823, 7830), True, 'import pylab as pl\n'), ((7953, 7964), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (7962, 7964), True, 'import pylab as pl\n'), ((8038, 8052), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (8045, 8052), True, 'import pylab as pl\n'), ((475, 491), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (483, 491), True, 'import numpy as np\n'), ((879, 926), 'numpy.sum', 'np.sum', (['(self.weights * self.activations)'], {'axis': '(1)'}), '(self.weights * self.activations, axis=1)\n', (885, 926), True, 'import numpy as np\n'), ((958, 982), 'numpy.where', 'np.where', (['(act > 0)', '(1)', '(-1)'], {}), '(act > 0, 1, -1)\n', (966, 982), True, 'import numpy as np\n'), ((1050, 1074), 'numpy.arange', 'np.arange', (['self.nneurons'], {}), '(self.nneurons)\n', (1059, 1074), True, 'import numpy as np\n'), ((1440, 1456), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (1448, 1456), True, 'import numpy as np\n'), ((3157, 3176), 'pylab.subplot', 'pl.subplot', (['(2)', '(5)', 'i'], {}), '(2, 5, i)\n', (3167, 3176), True, 'import pylab as pl\n'), ((3231, 3245), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (3238, 3245), True, 'import pylab as pl\n'), ((4399, 4410), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (4408, 4410), True, 'import pylab as pl\n'), ((4493, 4544), 'pylab.title', 'pl.title', (["('Novel Image. Reconstruction Step %s' % i)"], {}), "('Novel Image. Reconstruction Step %s' % i)\n", (4501, 4544), True, 'import pylab as pl\n'), ((4559, 4573), 'pylab.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (4566, 4573), True, 'import pylab as pl\n'), ((1119, 1143), 'numpy.random.shuffle', 'np.random.shuffle', (['order'], {}), '(order)\n', (1136, 1143), True, 'import numpy as np\n'), ((1191, 1236), 'numpy.sum', 'np.sum', (['(self.weights[i, :] * self.activations)'], {}), '(self.weights[i, :] * self.activations)\n', (1197, 1236), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun May 19 10:29:08 2019
@author: Darin
"""
import numpy as np
class Interpolation:
""" Base class for interpolation, handles the maximum feature length aspect
"""
def __init__(self, P, R, vdmin, p, q, minStiff=1e-10):
""" Base class to handle max features
Parameters
----------
P : sparse matrix
Density filter matrix
R : sparse matrix
Matrix used to sum element densities around each element
vdmin : scalar
Minimum volume of voids around each element
p : scalar
Penalty value for intermediate densities
q : scalar
Penalty value for overly thick features
minStiff : scalar, optional
Minimum Young's Modulus
"""
self.P = P
self.R = R
self.vdmin = vdmin
self.p = p
self.q = q
self.eps = minStiff
def BaseInterpolate(self, x):
""" Calculates element densities and a penalty where feature length
is too large
Parameters
----------
x : array_like
Design variables
Returns
-------
y : array_like
Sum of the voids around each element divided by minimum amount of
voids (capped at 1) raise to penalty q
z : array_like
rho : array_like
rhoq : array_like
"""
# Minimum operation is to prevent numerical errors where rho > 1
rho = np.minimum(self.P * x, 1)
if self.vdmin < 1:
# No maximum feature restriction
y = np.ones_like(rho)
rhoq = np.zeros_like(rho)
z = rho.copy()
else:
y = self.R * np.append((1-rho)**self.q, [1], axis=0)
y = np.minimum(y / self.vdmin, 1)
rhoq = self.q * (1-rho)**(self.q - 1) / self.vdmin
z = rho * y
return y, z, rho, rhoq
class SIMP(Interpolation):
""" Standard SIMP interpolation
"""
def __init__(self, P, R, vdmin, p, q, minStiff=1e-10):
""" Creates the material interpolation object and stores the relevant
parameters
Parameters
----------
P : sparse matrix
Density filter matrix
R : sparse matrix
Matrix used to sum element densities around each element
vdmin : scalar
Minimum volume of voids around each element
p : scalar
Penalty value for intermediate densities
q : scalar
Penalty value for overly thick features
minStiff : scalar, optional
Minimum Young's Modulus
"""
Interpolation.__init__(self, P, R, vdmin, p, q, minStiff)
def Interpolate(self, x):
""" Calculate the interpolated material values
Parameters
----------
x : array_like
Design variables
Returns
-------
matVals : dict
Interpolated material values and their sensitivities
"""
y, z, rho, rhoq = self.BaseInterpolate(x)
matVals = {'y':y, 'rho':rho, 'rhoq':rhoq}
matVals['E'] = (z>0) * self.eps + (1-self.eps) * z**self.p
matVals['Es'] = z**self.p
matVals['V'] = rho
matVals['dEdy'] = (1-self.eps) * self.p * z**(self.p-1)
matVals['dEsdy'] = self.p * z**(self.p-1)
matVals['dVdy'] = np.ones(rho.shape)
return matVals
class SIMP_CUT(Interpolation):
""" Standard SIMP interpolation with a minimum density for stress-stiffness
interpolation
"""
def __init__(self, P, R, vdmin, p, q, cut, minStiff=1e-10):
""" Creates the material interpolation object and stores the relevant
parameters
Parameters
----------
P : sparse matrix
Density filter matrix
R : sparse matrix
Matrix used to sum element densities around each element
vdmin : scalar
Minimum volume of voids around each element
p : scalar
Penalty value for intermediate densities
q : scalar
Penalty value for overly thick features
cut : scalar
Minimum density for stress stiffness to be nonzero
minStiff : scalar, optional
Minimum Young's Modulus
"""
Interpolation.__init__(self, P, R, vdmin, p, q, minStiff)
self.cut = cut
def Interpolate(self, x):
""" Calculate the interpolated material values
Parameters
----------
x : array_like
Design variables
Returns
-------
matVals : dict
Interpolated material values and their sensitivities
"""
y, z, rho, rhoq = self.BaseInterpolate(x)
matVals = {'y':y, 'rho':rho, 'rhoq':rhoq}
matVals['E'] = self.eps + (1-self.eps) * z**self.p
matVals['Es'] = (z > self.cut) * z**self.p
matVals['V'] = rho
matVals['dEdy'] = (1-self.eps) * self.p * z**(self.p-1)
matVals['dEsdy'] = self.p * (z > self.cut) * z**(self.p-1)
matVals['dVdy'] = np.ones(rho.shape)
return matVals
#class SIMP_LOGISTIC:
# """ SIMP interpolation with a logistic function instead of a power law
# """
# def __init__(self, penal, rate, trans, minStiff=1e-10):
# """ Creates the material interpolation object and stores the relevant
# parameters
#
# Parameters
# ----------
# penal : scalar
# Penalty value
# rate : scalar
# Controls the rate of transition of the logistic function
# trans : scalar
# Inflection point of logistic function
# minStiff : scalar
# Minimum Young's Modulus
#
# """
#
# self.p = penal
# self.rate = rate
# self.trans = trans
# self.eps = minStiff
#
# def Interpolate(self, y):
# """ Calculate the interpolated material values
#
# Parameters
# ----------
# y : array_like
# Filtered material densities
#
# Returns
# -------
# matVals : dict
# Interpolated material values and their sensitivities
#
# """
#
# matVals = {}
# matVals['E'] = self.eps + (1-self.eps) * y**self.p
# denom = 1 + np.exp(self.rate * (self.trans-y))
# matVals['Es'] = (y**self.p) / denom
# matVals['V'] = y
# matVals['dEdy'] = (1-self.eps) * self.p * y**(self.p-1)
# matVals['dEsdy'] = (self.p * y**(self.p-1) + (matVals['Es'] *
# self.rate) * (1-1./denom)) / denom
# matVals['dVdy'] = np.ones(y.shape)
#
# return matVals
#
#class SIMP_SMOOTH:
# """ SIMP interpolation with a smoothstep function instead of a power law
# """
# def __init__(self, penal, rate, trans, minStiff=1e-10):
# """ Creates the material interpolation object and stores the relevant
# parameters
#
# Parameters
# ----------
# penal : scalar
# Penalty value
# minimum : scalar
# Point where the function hits zero
# maximum : scalar
# Point where the function hits one
# minStiff : scalar
# Minimum Young's Modulus
#
# """
#
# self.p = penal
# self.minimum = minimum
# self.maximum = maximum
# self.eps = minStiff
#
# def Interpolate(self, y):
# """ Calculate the interpolated material values
#
# Parameters
# ----------
# y : array_like
# Filtered material densities
#
# Returns
# -------
# matVals : dict
# Interpolated material values and their sensitivities
#
# """
#
# matVals = {}
# matVals['E'] = self.eps + (1-self.eps) * y**self.p
# matVals['Es'] = y**self.p
# matVals['V'] = y
# matVals['dEdy'] = (1-self.eps) * self.p * y**(self.p-1)
# matVals['dEsdy'] = self.p * y**(self.p-1)
# shift = (y-self.minimum) / (self.maximum-self.minimum)
# adjust = ((6*shift**5 - 15*shift**4 + 10*shift**3) *
# (shift >= 0 & shift <= 1) + (shift > 1))
# dadjust = ((30*shift**4 - 60*shift**3 + 30*shift**2) *
# (shift >= 0 & shift <= 1)/(self.maximum-self.minimum))
# matVals['dEsdy'] *= adjust + matVals['Es'] * dadjust
# matVals['Es'] *= adjust
# matVals['dVdy'] = np.ones(y.shape)
#
# return matVals
# | [
"numpy.zeros_like",
"numpy.minimum",
"numpy.ones_like",
"numpy.ones",
"numpy.append"
] | [((1593, 1618), 'numpy.minimum', 'np.minimum', (['(self.P * x)', '(1)'], {}), '(self.P * x, 1)\n', (1603, 1618), True, 'import numpy as np\n'), ((3598, 3616), 'numpy.ones', 'np.ones', (['rho.shape'], {}), '(rho.shape)\n', (3605, 3616), True, 'import numpy as np\n'), ((5409, 5427), 'numpy.ones', 'np.ones', (['rho.shape'], {}), '(rho.shape)\n', (5416, 5427), True, 'import numpy as np\n'), ((1707, 1724), 'numpy.ones_like', 'np.ones_like', (['rho'], {}), '(rho)\n', (1719, 1724), True, 'import numpy as np\n'), ((1744, 1762), 'numpy.zeros_like', 'np.zeros_like', (['rho'], {}), '(rho)\n', (1757, 1762), True, 'import numpy as np\n'), ((1885, 1914), 'numpy.minimum', 'np.minimum', (['(y / self.vdmin)', '(1)'], {}), '(y / self.vdmin, 1)\n', (1895, 1914), True, 'import numpy as np\n'), ((1829, 1872), 'numpy.append', 'np.append', (['((1 - rho) ** self.q)', '[1]'], {'axis': '(0)'}), '((1 - rho) ** self.q, [1], axis=0)\n', (1838, 1872), True, 'import numpy as np\n')] |
#testing script for the basic elements to training a convolutional variational
#autoencoder for the time series masks.
#the architecture is based on that for the MNIST by debuggercafe:
#https://debuggercafe.com/convolutional-variational-autoencoder-in-pytorch-on-mnist-dataset/
##############################################################################
#train.py
def clear_all():
"""Clears all the variables from the workspace of the spyder application."""
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
clear_all()
import torch
import torch.optim as optim
import torch.nn as nn
import model
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
import matplotlib
import numpy as np
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from engine import train, validate
import matplotlib.pyplot as plt
from utility import save_reconstructed_images, image_to_vid, save_loss_plot
matplotlib.style.use('ggplot')
plt.rcParams.update({'font.size': 25})
def normalize(x):
norm = np.linalg.norm(x)
y = x/norm
return y
def npy_loader(path):
return torch.from_numpy(normalize(np.load(path))).float()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# initialize the model
model = model.ConvVAE().to(device)
# set the learning parameters
lr = 0.001
epochs = 100
batch_size = 50
optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.BCELoss(reduction='mean')
# a list to save all the reconstructed images in PyTorch grid format
grid_images = []
presetN = 1
transform = transforms.Compose([
transforms.Resize((24, 24)),
#transforms.ToTensor(),
])
trainset = datasets.DatasetFolder(
root='', ##enter root here
loader=npy_loader,
extensions='.npy',
transform = transform
)
trainloader = DataLoader(
trainset, batch_size=batch_size, shuffle=True
)
testset = datasets.DatasetFolder(
root='', ##enter root here
loader=npy_loader,
extensions='.npy',
transform = transform
)
testloader = DataLoader(
testset, batch_size=batch_size, shuffle=False
)
# # training set and train data loader
# trainset = torchvision.datasets.MNIST(
# root='../input', train=True, download=True, transform=transform
# )
# trainloader = DataLoader(
# trainset, batch_size=batch_size, shuffle=True
# )
# # validation set and validation data loader
# testset = torchvision.datasets.MNIST(
# root='../input', train=False, download=True, transform=transform
# )
# testloader = DataLoader(
# testset, batch_size=batch_size, shuffle=False
# )
train_loss = []
valid_loss = []
for epoch in range(epochs):
print(f"Epoch {epoch+1} of {epochs}")
train_epoch_loss = train(
model, trainloader, trainset, device, optimizer, criterion
)
valid_epoch_loss, recon_images = validate(
model, testloader, testset, device, criterion
)
train_loss.append(train_epoch_loss)
valid_loss.append(valid_epoch_loss)
## save the reconstructed images from the validation loop
#save_reconstructed_images(recon_images, epoch+1,presetN)
## convert the reconstructed images to PyTorch image grid format
#image_grid = make_grid(recon_images.detach().cpu())
#grid_images.append(image_grid)
print(f"Train Loss: {train_epoch_loss:.4f}")
print(f"Val Loss: {valid_epoch_loss:.4f}")
# save the reconstructions as a .gif file
#image_to_vid(grid_images)
# save the loss plots to disk
#save_loss_plot(train_loss, valid_loss,presetN)
plt.figure(figsize=(20, 14))
plt.plot(train_loss, color='orange', label='train loss')
plt.plot(valid_loss, color='red', label='validataion loss')
plt.xlabel('Epochs', fontsize = 20)
plt.ylabel('Loss', fontsize = 20)
plt.legend(fontsize = 20)
plt.show()
print('TRAINING COMPLETE')
| [
"numpy.load",
"matplotlib.style.use",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"engine.validate",
"torchvision.datasets.DatasetFolder",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"model.parameters",
"engine.train",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.show",
... | [((1200, 1230), 'matplotlib.style.use', 'matplotlib.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1220, 1230), False, 'import matplotlib\n'), ((1232, 1270), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 25}"], {}), "({'font.size': 25})\n", (1251, 1270), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1739), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (1721, 1739), True, 'import torch.nn as nn\n'), ((1958, 2052), 'torchvision.datasets.DatasetFolder', 'datasets.DatasetFolder', ([], {'root': '""""""', 'loader': 'npy_loader', 'extensions': '""".npy"""', 'transform': 'transform'}), "(root='', loader=npy_loader, extensions='.npy',\n transform=transform)\n", (1980, 2052), True, 'import torchvision.datasets as datasets\n'), ((2107, 2164), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (2117, 2164), False, 'from torch.utils.data import DataLoader\n'), ((2186, 2280), 'torchvision.datasets.DatasetFolder', 'datasets.DatasetFolder', ([], {'root': '""""""', 'loader': 'npy_loader', 'extensions': '""".npy"""', 'transform': 'transform'}), "(root='', loader=npy_loader, extensions='.npy',\n transform=transform)\n", (2208, 2280), True, 'import torchvision.datasets as datasets\n'), ((2334, 2391), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(testset, batch_size=batch_size, shuffle=False)\n', (2344, 2391), False, 'from torch.utils.data import DataLoader\n'), ((3851, 3879), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 14)'}), '(figsize=(20, 14))\n', (3861, 3879), True, 'import matplotlib.pyplot as plt\n'), ((3881, 3937), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss'], {'color': '"""orange"""', 'label': '"""train loss"""'}), "(train_loss, color='orange', label='train loss')\n", (3889, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3939, 3998), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss'], {'color': '"""red"""', 'label': '"""validataion loss"""'}), "(valid_loss, color='red', label='validataion loss')\n", (3947, 3998), True, 'import matplotlib.pyplot as plt\n'), ((4000, 4033), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {'fontsize': '(20)'}), "('Epochs', fontsize=20)\n", (4010, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4068), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {'fontsize': '(20)'}), "('Loss', fontsize=20)\n", (4047, 4068), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4095), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (4082, 4095), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4109), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4107, 4109), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1321), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (1318, 1321), True, 'import numpy as np\n'), ((1671, 1689), 'model.parameters', 'model.parameters', ([], {}), '()\n', (1687, 1689), False, 'import model\n'), ((3030, 3095), 'engine.train', 'train', (['model', 'trainloader', 'trainset', 'device', 'optimizer', 'criterion'], {}), '(model, trainloader, trainset, device, optimizer, criterion)\n', (3035, 3095), False, 'from engine import train, validate\n'), ((3150, 3205), 'engine.validate', 'validate', (['model', 'testloader', 'testset', 'device', 'criterion'], {}), '(model, testloader, testset, device, criterion)\n', (3158, 3205), False, 'from engine import train, validate\n'), ((1475, 1500), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1498, 1500), False, 'import torch\n'), ((1546, 1561), 'model.ConvVAE', 'model.ConvVAE', ([], {}), '()\n', (1559, 1561), False, 'import model\n'), ((1882, 1909), 'torchvision.transforms.Resize', 'transforms.Resize', (['(24, 24)'], {}), '((24, 24))\n', (1899, 1909), True, 'import torchvision.transforms as transforms\n'), ((1416, 1429), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1423, 1429), True, 'import numpy as np\n')] |
__author__ = 'igor'
import pickle
import matplotlib.pyplot as plt
import numpy as np
from loadData import *
with open("data/net1.pickle", 'rb') as f1:
net1 = pickle.load(f1)
f1.close()
with open("data/net2.pickle", 'rb') as f2:
net2 = pickle.load(f2)
f2.close()
# net1保存了训练中的结果
train_loss1 = np.array([i["train_loss"] for i in net1.train_history_])
valid_loss1 = np.array([i["valid_loss"] for i in net1.train_history_])
train_loss2 = np.array([i["train_loss"] for i in net2.train_history_])[:400]
valid_loss2 = np.array([i["valid_loss"] for i in net2.train_history_])[:400]
plt.plot(train_loss1, linewidth=3, label="train1")
plt.plot(valid_loss1, linewidth=3, label="valid1")
plt.plot(train_loss2, linewidth=3, label="train2", linestyle="--")
plt.plot(valid_loss2, linewidth=3, label="valid2", linestyle="--")
plt.grid()
plt.legend()
plt.xlabel("epoch")
plt.ylabel("loss")
#plt.ylim(1e-3, 1e-2)
plt.yscale("log")
plt.show()
| [
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"pickle.load",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((303, 359), 'numpy.array', 'np.array', (["[i['train_loss'] for i in net1.train_history_]"], {}), "([i['train_loss'] for i in net1.train_history_])\n", (311, 359), True, 'import numpy as np\n'), ((374, 430), 'numpy.array', 'np.array', (["[i['valid_loss'] for i in net1.train_history_]"], {}), "([i['valid_loss'] for i in net1.train_history_])\n", (382, 430), True, 'import numpy as np\n'), ((585, 635), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss1'], {'linewidth': '(3)', 'label': '"""train1"""'}), "(train_loss1, linewidth=3, label='train1')\n", (593, 635), True, 'import matplotlib.pyplot as plt\n'), ((636, 686), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss1'], {'linewidth': '(3)', 'label': '"""valid1"""'}), "(valid_loss1, linewidth=3, label='valid1')\n", (644, 686), True, 'import matplotlib.pyplot as plt\n'), ((687, 753), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss2'], {'linewidth': '(3)', 'label': '"""train2"""', 'linestyle': '"""--"""'}), "(train_loss2, linewidth=3, label='train2', linestyle='--')\n", (695, 753), True, 'import matplotlib.pyplot as plt\n'), ((754, 820), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss2'], {'linewidth': '(3)', 'label': '"""valid2"""', 'linestyle': '"""--"""'}), "(valid_loss2, linewidth=3, label='valid2', linestyle='--')\n", (762, 820), True, 'import matplotlib.pyplot as plt\n'), ((821, 831), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (829, 831), True, 'import matplotlib.pyplot as plt\n'), ((832, 844), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (842, 844), True, 'import matplotlib.pyplot as plt\n'), ((845, 864), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (855, 864), True, 'import matplotlib.pyplot as plt\n'), ((865, 883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (875, 883), True, 'import matplotlib.pyplot as plt\n'), ((906, 923), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (916, 923), True, 'import matplotlib.pyplot as plt\n'), ((924, 934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (932, 934), True, 'import matplotlib.pyplot as plt\n'), ((163, 178), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (174, 178), False, 'import pickle\n'), ((245, 260), 'pickle.load', 'pickle.load', (['f2'], {}), '(f2)\n', (256, 260), False, 'import pickle\n'), ((445, 501), 'numpy.array', 'np.array', (["[i['train_loss'] for i in net2.train_history_]"], {}), "([i['train_loss'] for i in net2.train_history_])\n", (453, 501), True, 'import numpy as np\n'), ((522, 578), 'numpy.array', 'np.array', (["[i['valid_loss'] for i in net2.train_history_]"], {}), "([i['valid_loss'] for i in net2.train_history_])\n", (530, 578), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# PyQT4 imports
from PyQt4 import QtGui, QtCore, QtOpenGL
from PyQt4.QtOpenGL import QGLWidget
# PyOpenGL imports
import OpenGL.GL as gl
import OpenGL.arrays.vbo as glvbo
class GLPlotWidget(QGLWidget):
# default window size
width, height = 600, 600
def set_data(self, data):
"""Load 2D data as a Nx2 Numpy array.
"""
self.data = data
self.count = data.shape[0]
def initializeGL(self):
"""Initialize OpenGL, VBOs, upload data on the GPU, etc.
"""
# background color
gl.glClearColor(0,0,0,0)
# create a Vertex Buffer Object with the specified data
self.vbo = glvbo.VBO(self.data)
def paintGL(self):
"""Paint the scene.
"""
# clear the buffer
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
# set yellow color for subsequent drawing rendering calls
gl.glColor(1,1,0)
# bind the VBO
self.vbo.bind()
# tell OpenGL that the VBO contains an array of vertices
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
# these vertices contain 2 single precision coordinates
gl.glVertexPointer(2, gl.GL_FLOAT, 0, self.vbo)
# draw "count" points from the VBO
gl.glDrawArrays(gl.GL_POINTS, 0, self.count)
def resizeGL(self, width, height):
"""Called upon window resizing: reinitialize the viewport.
"""
# update the window size
self.width, self.height = width, height
# paint within the whole window
gl.glViewport(0, 0, width, height)
# set orthographic projection (2D only)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
# the window corner OpenGL coordinates are (-+1, -+1)
gl.glOrtho(-1, 1, -1, 1, -1, 1)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Q:
QtGui.qApp.quit()
if __name__ == '__main__':
# import numpy for generating random data points
import sys
import numpy as np
import numpy.random as rdn
# define a QT window with an OpenGL widget inside it
class TestWindow(QtGui.QMainWindow):
def __init__(self):
super(TestWindow, self).__init__()
# generate random data points
self.data = np.array(.2*rdn.randn(1000,2),dtype=np.float32)
# initialize the GL widget
self.widget = GLPlotWidget()
self.widget.set_data(self.data)
# put the window at the screen position (100, 100)
self.setGeometry(100, 100, self.widget.width, self.widget.height)
self.setCentralWidget(self.widget)
self.widget.setFocusPolicy(QtCore.Qt.StrongFocus)
self.show()
# create the QT App and window
app = QtGui.QApplication(sys.argv)
window = TestWindow()
window.show()
app.exec_()
| [
"OpenGL.GL.glViewport",
"OpenGL.GL.glMatrixMode",
"OpenGL.arrays.vbo.VBO",
"numpy.random.randn",
"OpenGL.GL.glOrtho",
"PyQt4.QtGui.QApplication",
"OpenGL.GL.glEnableClientState",
"OpenGL.GL.glColor",
"OpenGL.GL.glClearColor",
"OpenGL.GL.glVertexPointer",
"OpenGL.GL.glDrawArrays",
"OpenGL.GL.gl... | [((2817, 2845), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2835, 2845), False, 'from PyQt4 import QtGui, QtCore, QtOpenGL\n'), ((570, 597), 'OpenGL.GL.glClearColor', 'gl.glClearColor', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (585, 597), True, 'import OpenGL.GL as gl\n'), ((678, 698), 'OpenGL.arrays.vbo.VBO', 'glvbo.VBO', (['self.data'], {}), '(self.data)\n', (687, 698), True, 'import OpenGL.arrays.vbo as glvbo\n'), ((798, 832), 'OpenGL.GL.glClear', 'gl.glClear', (['gl.GL_COLOR_BUFFER_BIT'], {}), '(gl.GL_COLOR_BUFFER_BIT)\n', (808, 832), True, 'import OpenGL.GL as gl\n'), ((907, 926), 'OpenGL.GL.glColor', 'gl.glColor', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (917, 926), True, 'import OpenGL.GL as gl\n'), ((1045, 1087), 'OpenGL.GL.glEnableClientState', 'gl.glEnableClientState', (['gl.GL_VERTEX_ARRAY'], {}), '(gl.GL_VERTEX_ARRAY)\n', (1067, 1087), True, 'import OpenGL.GL as gl\n'), ((1160, 1207), 'OpenGL.GL.glVertexPointer', 'gl.glVertexPointer', (['(2)', 'gl.GL_FLOAT', '(0)', 'self.vbo'], {}), '(2, gl.GL_FLOAT, 0, self.vbo)\n', (1178, 1207), True, 'import OpenGL.GL as gl\n'), ((1259, 1303), 'OpenGL.GL.glDrawArrays', 'gl.glDrawArrays', (['gl.GL_POINTS', '(0)', 'self.count'], {}), '(gl.GL_POINTS, 0, self.count)\n', (1274, 1303), True, 'import OpenGL.GL as gl\n'), ((1552, 1586), 'OpenGL.GL.glViewport', 'gl.glViewport', (['(0)', '(0)', 'width', 'height'], {}), '(0, 0, width, height)\n', (1565, 1586), True, 'import OpenGL.GL as gl\n'), ((1643, 1676), 'OpenGL.GL.glMatrixMode', 'gl.glMatrixMode', (['gl.GL_PROJECTION'], {}), '(gl.GL_PROJECTION)\n', (1658, 1676), True, 'import OpenGL.GL as gl\n'), ((1685, 1704), 'OpenGL.GL.glLoadIdentity', 'gl.glLoadIdentity', ([], {}), '()\n', (1702, 1704), True, 'import OpenGL.GL as gl\n'), ((1775, 1806), 'OpenGL.GL.glOrtho', 'gl.glOrtho', (['(-1)', '(1)', '(-1)', '(1)', '(-1)', '(1)'], {}), '(-1, 1, -1, 1, -1, 1)\n', (1785, 1806), True, 'import OpenGL.GL as gl\n'), ((1917, 1934), 'PyQt4.QtGui.qApp.quit', 'QtGui.qApp.quit', ([], {}), '()\n', (1932, 1934), False, 'from PyQt4 import QtGui, QtCore, QtOpenGL\n'), ((2337, 2355), 'numpy.random.randn', 'rdn.randn', (['(1000)', '(2)'], {}), '(1000, 2)\n', (2346, 2355), True, 'import numpy.random as rdn\n')] |
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
class GaussianNaiveBayes(BaseEstimator):
"""
Gaussian Naive-Bayes classifier
"""
def __init__(self):
"""
Instantiate a Gaussian Naive Bayes classifier
Attributes
----------
self.classes_ : np.ndarray of shape (n_classes,)
The different labels classes. To be set in `GaussianNaiveBayes.fit`
self.mu_ : np.ndarray of shape (n_classes,n_features)
The estimated features means for each class. To be set in `GaussianNaiveBayes.fit`
self.vars_ : np.ndarray of shape (n_classes, n_features)
The estimated features variances for each class. To be set in `GaussianNaiveBayes.fit`
self.pi_: np.ndarray of shape (n_classes)
The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`
"""
super().__init__()
self.classes_, self.mu_, self.vars_, self.pi_ = None, None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits a gaussian naive bayes model
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
m = len(y)
# fit classes
self.classes_ = np.unique(y)
n_k = [np.sum(y == k) for k in self.classes_] # |{i:y_i=k}| for each label k
K = len(self.classes_)
# fit mu
self.fit_mu(X, K, n_k, y)
# fit cov
self.fit_var_matrix(X, n_k, y)
# fit pi
self.pi_ = np.zeros(K)
for i in range(len(self.classes_)):
self.pi_[i] = n_k[i] / m
self.fitted_ = True
def fit_var_matrix(self, X, n_k, y):
vars = []
for index, k in enumerate(self.classes_):
X_relevant_rows = X[y == k]
kth_row_content = []
for i in range(len(X_relevant_rows)):
kth_row_content.append(np.square(X_relevant_rows[i] - self.mu_[k]))
vars.append(np.sum(np.array(kth_row_content), axis=0) / (n_k[index] - 1))
self.vars_ = np.array(vars)
def fit_mu(self, X, k, n_k, y):
temp_mu = []
for i, label in enumerate(self.classes_):
# select and sum the rows i in X when y[i] = current label
X_relevant_rows = X[y == label]
sum_relevant_x = np.sum(X_relevant_rows, axis=0)
# calculate the MLE for the current label and add it to self.mu
temp_mu.append(sum_relevant_x / n_k[i])
self.mu_ = np.array(temp_mu)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
index = np.argmax(self.likelihood(X), axis=1)
return self.classes_[index]
def likelihood(self, X: np.ndarray) -> np.ndarray:
"""
Calculate the likelihood of a given data over the estimated model
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
Input data to calculate its likelihood over the different classes.
Returns
-------
likelihoods : np.ndarray of shape (n_samples, n_classes)
The likelihood for each sample under each of the classes
"""
if not self.fitted_:
raise ValueError("Estimator must first be fitted before calling `likelihood` function")
m = X.shape[0]
d = X.shape[1]
K = len(self.classes_)
likelihoods = []
for k in range(K):
part_1 = np.log(self.pi_[k])
part_2_list = []
for j in range(d):
sigma_k_j = self.vars_[k][j]
mu_k_j = self.mu_[k][j]
part_2_list.append(
(np.log(np.sqrt(2 * np.pi * sigma_k_j))) + ((np.square((X[:, j] - mu_k_j)) / sigma_k_j) / 2))
part_2 = np.sum(np.array(part_2_list), axis=0)
likelihoods.append(part_1 - part_2)
return np.array(likelihoods).T
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
from ...metrics import misclassification_error
return misclassification_error(y, self._predict(X))
| [
"numpy.sum",
"numpy.log",
"numpy.square",
"numpy.zeros",
"numpy.array",
"numpy.unique",
"numpy.sqrt"
] | [((1444, 1456), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1453, 1456), True, 'import numpy as np\n'), ((1721, 1732), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (1729, 1732), True, 'import numpy as np\n'), ((2267, 2281), 'numpy.array', 'np.array', (['vars'], {}), '(vars)\n', (2275, 2281), True, 'import numpy as np\n'), ((2713, 2730), 'numpy.array', 'np.array', (['temp_mu'], {}), '(temp_mu)\n', (2721, 2730), True, 'import numpy as np\n'), ((1472, 1486), 'numpy.sum', 'np.sum', (['(y == k)'], {}), '(y == k)\n', (1478, 1486), True, 'import numpy as np\n'), ((2534, 2565), 'numpy.sum', 'np.sum', (['X_relevant_rows'], {'axis': '(0)'}), '(X_relevant_rows, axis=0)\n', (2540, 2565), True, 'import numpy as np\n'), ((4015, 4034), 'numpy.log', 'np.log', (['self.pi_[k]'], {}), '(self.pi_[k])\n', (4021, 4034), True, 'import numpy as np\n'), ((4453, 4474), 'numpy.array', 'np.array', (['likelihoods'], {}), '(likelihoods)\n', (4461, 4474), True, 'import numpy as np\n'), ((4358, 4379), 'numpy.array', 'np.array', (['part_2_list'], {}), '(part_2_list)\n', (4366, 4379), True, 'import numpy as np\n'), ((2115, 2158), 'numpy.square', 'np.square', (['(X_relevant_rows[i] - self.mu_[k])'], {}), '(X_relevant_rows[i] - self.mu_[k])\n', (2124, 2158), True, 'import numpy as np\n'), ((2191, 2216), 'numpy.array', 'np.array', (['kth_row_content'], {}), '(kth_row_content)\n', (2199, 2216), True, 'import numpy as np\n'), ((4244, 4274), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma_k_j)'], {}), '(2 * np.pi * sigma_k_j)\n', (4251, 4274), True, 'import numpy as np\n'), ((4281, 4308), 'numpy.square', 'np.square', (['(X[:, j] - mu_k_j)'], {}), '(X[:, j] - mu_k_j)\n', (4290, 4308), True, 'import numpy as np\n')] |
import asyncio
import time
from asyncio import coroutine
import numpy as np
from openvpp_agents.core.observer.monitoring import Monitoring
import psutil
class PerformanceMonitoring(Monitoring):
def __init__(self, dbfile):
super().__init__(dbfile)
self._stop_monitor_run = True
self._task_monitor_run = None
self._loop = asyncio.get_event_loop()
def setup(self, date):
"""Setup performance monitoring for a new run / negotiation.
This also triggers the setup of the parent monitoring class.
:param date: The begin date of the target_schedule for the
negotitation.
"""
super().setup(date)
# Monitoring setup
self._stop_monitor_run = False
self._task_monitor_run = self._loop.run_in_executor(
None, self._monitor_run)
@coroutine
def flush(self, target_schedule, weights, solution):
# performance monitoring
self._stop_monitor_run = True
perf_data = yield from self._task_monitor_run
self._store_perf_data(self._topgroup, perf_data)
# self._db.flush()
# general monitoring flushes database
yield from super().flush(target_schedule, weights, solution)
def _monitor_run(self):
def add_children(proc):
for c in proc.children():
procs.append(c)
add_children(c)
procs = [psutil.Process()]
add_children(procs[0])
mem_bytes = psutil.virtual_memory().total
mem_percent = mem_bytes / 100
data = []
try:
while not self._stop_monitor_run:
cpu = sum(p.cpu_percent() for p in procs)
mem = sum(p.memory_percent() for p in procs) * mem_percent
t = time.monotonic()
data.append((t, cpu, mem))
time.sleep(0.01)
except psutil.AccessDenied:
# May happen when the monitored procs terminated while we slept
pass
return data
def _store_perf_data(self, group, perf_data):
dtype = np.dtype([
('t', 'float64'),
('cpu_percent', 'float32'),
('mem_bytes', 'uint64'),
])
perf_data = np.array(perf_data, dtype=dtype)
group.create_dataset('perf_data', data=perf_data)
| [
"psutil.virtual_memory",
"psutil.Process",
"asyncio.get_event_loop",
"numpy.dtype",
"time.sleep",
"time.monotonic",
"numpy.array"
] | [((362, 386), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (384, 386), False, 'import asyncio\n'), ((2106, 2191), 'numpy.dtype', 'np.dtype', (["[('t', 'float64'), ('cpu_percent', 'float32'), ('mem_bytes', 'uint64')]"], {}), "([('t', 'float64'), ('cpu_percent', 'float32'), ('mem_bytes',\n 'uint64')])\n", (2114, 2191), True, 'import numpy as np\n'), ((2255, 2287), 'numpy.array', 'np.array', (['perf_data'], {'dtype': 'dtype'}), '(perf_data, dtype=dtype)\n', (2263, 2287), True, 'import numpy as np\n'), ((1428, 1444), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (1442, 1444), False, 'import psutil\n'), ((1498, 1521), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1519, 1521), False, 'import psutil\n'), ((1796, 1812), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1810, 1812), False, 'import time\n'), ((1872, 1888), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1882, 1888), False, 'import time\n')] |
# class for RANS ContinuityEquationWithFavrianDilatation #
import numpy as np
import sys
from scipy import integrate
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class ContinuityEquationWithFavrianDilatation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, fext, intc, nsdim, data_prefix):
super(ContinuityEquationWithFavrianDilatation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
yzn0 = self.getRAdata(eht, 'yzn0')
zzn0 = self.getRAdata(eht, 'zzn0')
nx = self.getRAdata(eht, 'nx')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
mm = self.getRAdata(eht, 'mm')[intc]
# store time series for time derivatives
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
# construct equation-specific mean fields
fht_ux = ddux / dd
#############################################
# CONTINUITY EQUATION WITH FAVRIAN DILATATION
#############################################
# LHS -dq/dt
self.minus_dt_dd = -self.dt(t_dd, xzn0, t_timec, intc)
# LHS -fht_ux Grad dd
self.minus_fht_ux_grad_dd = -fht_ux * self.Grad(dd, xzn0)
# RHS -dd Div fht_ux
self.minus_dd_div_fht_ux = -dd * self.Div(fht_ux, xzn0)
# -res
self.minus_resContEquation = -(self.minus_dt_dd + self.minus_fht_ux_grad_dd + self.minus_dd_div_fht_ux)
#################################################
# END CONTINUITY EQUATION WITH FAVRIAN DILATATION
#################################################
# ad hoc variables
vol = (4. / 3.) * np.pi * xzn0 ** 3
mm_ver2 = dd * vol
# -Div fdd for boundary identification
fdd = ddux - dd * ux
self.minus_div_fdd = -self.Div(fdd, xzn0)
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.yzn0 = yzn0
self.zzn0 = zzn0
self.dd = dd
self.nx = nx
self.ig = ig
self.fext = fext
self.vol = vol
self.mm = mm
self.mm_ver2 = mm_ver2
self.nsdim = nsdim
def plot_rho(self, laxis, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot rho stratification in the model"""
# check supported geometries
if self.ig != 1 and self.ig != 2:
print("ERROR(ContinuityEquationWithFavrianDilatation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.dd
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(laxis, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('density')
plt.plot(grd1, plt1, color='brown', label=r'$\overline{\rho}$')
# convective boundary markers
#plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
#plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
plt.xlabel(setxlabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
plt.xlabel(setxlabel)
setylabel = r"$\overline{\rho}$ (g cm$^{-3}$)"
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# check supported file output extension
if self.fext != "png" and self.fext != "eps":
print("ERROR(ContinuityEquationWithFavrianDilatation.py):" + self.errorOutputFileExtension(self.fext))
sys.exit()
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_rho.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_rho.eps')
def plot_continuity_equation(self, laxis, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot continuity equation in the model"""
# check supported geometries
if self.ig != 1 and self.ig != 2:
print("ERROR(ContinuityEquationWithFavrianDilatation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
lhs0 = self.minus_dt_dd
lhs1 = self.minus_fht_ux_grad_dd
rhs0 = self.minus_dd_div_fht_ux
res = self.minus_resContEquation
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [lhs0, lhs1, rhs0, res]
self.set_plt_axis(laxis, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title(r"continuity equation with Favrian dilatation " + str(self.nsdim) + "D")
# plt.title(r"Equation 14")
if self.ig == 1:
plt.plot(grd1, lhs0, color='g', linewidth= 4, label=r'$-\partial_t \overline{\rho}$')
plt.plot(grd1, lhs1, color='r', label=r'$- \widetilde{u}_x \partial_x \overline{\rho}$')
# plt.plot(grd1, rhs0, color='b', label=r'$-\overline{\rho} \nabla_x (\widetilde{u}_x)$')
plt.plot(grd1, rhs0, color='b', label=r'$-\overline{\rho} \widetilde{d}$')
plt.plot(grd1, res, color='k', linestyle='--', label='+res')
elif self.ig == 2:
plt.plot(grd1, lhs0, color='g', label=r'$-\partial_t (\overline{\rho})$')
plt.plot(grd1, lhs1, color='r', label=r'$- \widetilde{u}_r \partial_r (\overline{\rho})$')
plt.plot(grd1, rhs0, color='b', label=r'$-\overline{\rho} \nabla_r (\widetilde{u}_r)$')
plt.plot(grd1, res, color='k', linestyle='--', label='+res')
# shade boundaries
#ind1 = self.nx/2 + np.where((self.minus_div_fdd[(self.nx/2):self.nx] > 6.))[0]
#rinc = grd1[ind1[0]]
#routc = grd1[ind1[-1]]
#plt.fill([rinc, routc, routc, rinc], [ybd, ybd, ybu, ybu], 'y', edgecolor='w')
#ind2 = np.where((self.minus_div_fdd[0:(self.nx/2)] > 0.0))[0]
#rinc = grd1[ind2[0]]
#routc = grd1[ind2[-1]]
#print(rinc,routc,ind2[0],ind2[-1],ind2,(self.nx/2),self.nx)
#print(self.nx)
#plt.fill([rinc, routc, routc, rinc], [ybd, ybd, ybu, ybu], 'y', edgecolor='w')
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
plt.xlabel(setxlabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
plt.xlabel(setxlabel)
setylabel = r"g cm$^{-3}$ s$^{-1}$"
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 14})
# display PLOT
plt.show(block=False)
# check supported file output extension
if self.fext != "png" and self.fext != "eps":
print("ERROR(ContinuityEquationWithFavrianDilatation.py):" + self.errorOutputFileExtension(self.fext))
sys.exit()
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq.eps')
def plot_continuity_equation_integral_budget(self, laxis, xbl, xbr, ybu, ybd):
"""Plot integral budgets of continuity equation in the model"""
# check supported geometries
if self.ig != 1 and self.ig != 2:
print("ERROR(ContinuityEquationWithFavrianDilatation.py):" + self.errorGeometry(self.ig))
sys.exit()
term1 = self.minus_dt_dd
term2 = self.minus_fht_ux_grad_dd
term3 = self.minus_dd_div_fht_ux
term4 = self.minus_resContEquation
# hack for the ccp setup getting rid of bndry noise
fct1 = 0.5e-1
fct2 = 1.e-1
xbl = xbl + fct1*xbl
xbr = xbr - fct2*xbl
print(xbl,xbr)
# calculate INDICES for grid boundaries
if laxis == 1 or laxis == 2:
idxl, idxr = self.idx_bndry(xbl, xbr)
else:
idxl = 0
idxr = self.nx - 1
term1_sel = term1[idxl:idxr]
term2_sel = term2[idxl:idxr]
term3_sel = term3[idxl:idxr]
term4_sel = term4[idxl:idxr]
rc = self.xzn0[idxl:idxr]
# handle geometry
Sr = 0.
if self.ig == 1:
Sr = (self.yzn0[-1] - self.yzn0[0]) * (self.zzn0[-1] - self.zzn0[0])
elif self.ig == 2:
Sr = 4. * np.pi * rc ** 2
int_term1 = integrate.simps(term1_sel * Sr, rc)
int_term2 = integrate.simps(term2_sel * Sr, rc)
int_term3 = integrate.simps(term3_sel * Sr, rc)
int_term4 = integrate.simps(term4_sel * Sr, rc)
fig = plt.figure(figsize=(7, 6))
ax = fig.add_subplot(1, 1, 1)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.xaxis.grid(color='gray', linestyle='dashed')
if laxis == 2:
plt.ylim([ybd, ybu])
fc = 1.
# note the change: I'm only supplying y data.
y = [int_term1 / fc, int_term2 / fc, int_term3 / fc, int_term4 / fc]
# calculate how many bars there will be
N = len(y)
# Generate a list of numbers, from 0 to N
# This will serve as the (arbitrary) x-axis, which
# we will then re-label manually.
ind = range(N)
# See note below on the breakdown of this command
ax.bar(ind, y, facecolor='#0000FF',
align='center', ecolor='black')
# Create a y label
ax.set_ylabel(r'g s$^{-1}$')
# Create a title, in italics
ax.set_title(r"continuity with $\widetilde{d}$ integral budget")
# This sets the ticks on the x axis to be exactly where we put
# the center of the bars.
ax.set_xticks(ind)
# Labels for the ticks on the x axis. It needs to be the same length
# as y (one label for each bar)
if self.ig == 1:
group_labels = [r"$-\overline{\rho} \widetilde{d}$", r"$-\partial_t \overline{\rho}$",
r"$-\widetilde{u}_x \partial_x \overline{\rho}$", 'res']
# Set the x tick labels to the group_labels defined above.
ax.set_xticklabels(group_labels, fontsize=16)
elif self.ig == 2:
group_labels = [r"$-\overline{\rho} \nabla_r \widetilde{u}_r$", r"$-\partial_t \overline{\rho}$",
r"$-\widetilde{u}_r \partial_r \overline{\rho}$", 'res']
# Set the x tick labels to the group_labels defined above.
ax.set_xticklabels(group_labels, fontsize=16)
# auto-rotate the x axis labels
fig.autofmt_xdate()
# display PLOT
plt.show(block=False)
# check supported file output extension
if self.fext != "png" and self.fext != "eps":
print("ERROR(ContinuityEquationWithFavrianDilatation.py):" + self.errorOutputFileExtension(self.fext))
sys.exit()
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq_bar.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq_bar.eps')
def plot_mm_vs_MM(self, laxis, xbl, xbr, ybu, ybd, ilg):
"""Plot mm vs MM in the model"""
# load x GRID
grd1 = self.xzn0
mm = self.mm_ver2
MM = self.mm
mm_lnV = mm * np.log(self.vol)
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [mm, MM, mm_lnV]
self.set_plt_axis(laxis, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('mm vs MM')
plt.plot(grd1, mm, color='g', label=r'$+\overline{m}$')
plt.plot(grd1, MM, color='r', label=r'$+\overline{M}$')
plt.plot(grd1, mm_lnV, color='b', linestyle='--', label=r'$+\overline{m} \ ln \ V$')
setxlabel = r'r (cm)'
setylabel = r"grams"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 12})
# display PLOT
plt.show(block=False)
# check supported file output extension
if self.fext != "png" and self.fext != "eps":
print("ERROR(ContinuityEquationWithFavrianDilatation.py):" + self.errorOutputFileExtension(self.fext))
sys.exit()
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'mm_vs_MM_eq.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'mm_vs_MM_eq.eps')
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotl... | [((3413, 3439), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (3423, 3439), True, 'import matplotlib.pyplot as plt\n'), ((3713, 3733), 'matplotlib.pyplot.title', 'plt.title', (['"""density"""'], {}), "('density')\n", (3722, 3733), True, 'import matplotlib.pyplot as plt\n'), ((3742, 3806), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'plt1'], {'color': '"""brown"""', 'label': '"""$\\\\overline{\\\\rho}$"""'}), "(grd1, plt1, color='brown', label='$\\\\overline{\\\\rho}$')\n", (3750, 3806), True, 'import matplotlib.pyplot as plt\n'), ((4275, 4296), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['setylabel'], {}), '(setylabel)\n', (4285, 4296), True, 'import matplotlib.pyplot as plt\n'), ((4328, 4366), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'ilg', 'prop': "{'size': 18}"}), "(loc=ilg, prop={'size': 18})\n", (4338, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4399, 4420), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4407, 4420), True, 'import matplotlib.pyplot as plt\n'), ((5471, 5497), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (5481, 5497), True, 'import matplotlib.pyplot as plt\n'), ((7415, 7475), 'matplotlib.pyplot.axvline', 'plt.axvline', (['bconv'], {'linestyle': '"""--"""', 'linewidth': '(0.7)', 'color': '"""k"""'}), "(bconv, linestyle='--', linewidth=0.7, color='k')\n", (7426, 7475), True, 'import matplotlib.pyplot as plt\n'), ((7484, 7544), 'matplotlib.pyplot.axvline', 'plt.axvline', (['tconv'], {'linestyle': '"""--"""', 'linewidth': '(0.7)', 'color': '"""k"""'}), "(tconv, linestyle='--', linewidth=0.7, color='k')\n", (7495, 7544), True, 'import matplotlib.pyplot as plt\n'), ((7824, 7845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['setylabel'], {}), '(setylabel)\n', (7834, 7845), True, 'import matplotlib.pyplot as plt\n'), ((7877, 7915), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'ilg', 'prop': "{'size': 14}"}), "(loc=ilg, prop={'size': 14})\n", (7887, 7915), True, 'import matplotlib.pyplot as plt\n'), ((7948, 7969), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7956, 7969), True, 'import matplotlib.pyplot as plt\n'), ((9791, 9826), 'scipy.integrate.simps', 'integrate.simps', (['(term1_sel * Sr)', 'rc'], {}), '(term1_sel * Sr, rc)\n', (9806, 9826), False, 'from scipy import integrate\n'), ((9847, 9882), 'scipy.integrate.simps', 'integrate.simps', (['(term2_sel * Sr)', 'rc'], {}), '(term2_sel * Sr, rc)\n', (9862, 9882), False, 'from scipy import integrate\n'), ((9903, 9938), 'scipy.integrate.simps', 'integrate.simps', (['(term3_sel * Sr)', 'rc'], {}), '(term3_sel * Sr, rc)\n', (9918, 9938), False, 'from scipy import integrate\n'), ((9959, 9994), 'scipy.integrate.simps', 'integrate.simps', (['(term4_sel * Sr)', 'rc'], {}), '(term4_sel * Sr, rc)\n', (9974, 9994), False, 'from scipy import integrate\n'), ((10010, 10036), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (10020, 10036), True, 'import matplotlib.pyplot as plt\n'), ((12007, 12028), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (12015, 12028), True, 'import matplotlib.pyplot as plt\n'), ((12802, 12828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (12812, 12828), True, 'import matplotlib.pyplot as plt\n'), ((13112, 13133), 'matplotlib.pyplot.title', 'plt.title', (['"""mm vs MM"""'], {}), "('mm vs MM')\n", (13121, 13133), True, 'import matplotlib.pyplot as plt\n'), ((13143, 13198), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'mm'], {'color': '"""g"""', 'label': '"""$+\\\\overline{m}$"""'}), "(grd1, mm, color='g', label='$+\\\\overline{m}$')\n", (13151, 13198), True, 'import matplotlib.pyplot as plt\n'), ((13207, 13262), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'MM'], {'color': '"""r"""', 'label': '"""$+\\\\overline{M}$"""'}), "(grd1, MM, color='r', label='$+\\\\overline{M}$')\n", (13215, 13262), True, 'import matplotlib.pyplot as plt\n'), ((13271, 13362), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'mm_lnV'], {'color': '"""b"""', 'linestyle': '"""--"""', 'label': '"""$+\\\\overline{m} \\\\ ln \\\\ V$"""'}), "(grd1, mm_lnV, color='b', linestyle='--', label=\n '$+\\\\overline{m} \\\\ ln \\\\ V$')\n", (13279, 13362), True, 'import matplotlib.pyplot as plt\n'), ((13425, 13446), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['setxlabel'], {}), '(setxlabel)\n', (13435, 13446), True, 'import matplotlib.pyplot as plt\n'), ((13455, 13476), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['setylabel'], {}), '(setylabel)\n', (13465, 13476), True, 'import matplotlib.pyplot as plt\n'), ((13508, 13546), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'ilg', 'prop': "{'size': 12}"}), "(loc=ilg, prop={'size': 12})\n", (13518, 13546), True, 'import matplotlib.pyplot as plt\n'), ((13579, 13600), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (13587, 13600), True, 'import matplotlib.pyplot as plt\n'), ((3269, 3279), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3277, 3279), False, 'import sys\n'), ((4094, 4115), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['setxlabel'], {}), '(setxlabel)\n', (4104, 4115), True, 'import matplotlib.pyplot as plt\n'), ((4651, 4661), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4659, 4661), False, 'import sys\n'), ((4726, 4785), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'mean_rho.png')"], {}), "('RESULTS/' + self.data_prefix + 'mean_rho.png')\n", (4737, 4785), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4888), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'mean_rho.eps')"], {}), "('RESULTS/' + self.data_prefix + 'mean_rho.eps')\n", (4840, 4888), True, 'import matplotlib.pyplot as plt\n'), ((5222, 5232), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5230, 5232), False, 'import sys\n'), ((5944, 6035), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'lhs0'], {'color': '"""g"""', 'linewidth': '(4)', 'label': '"""$-\\\\partial_t \\\\overline{\\\\rho}$"""'}), "(grd1, lhs0, color='g', linewidth=4, label=\n '$-\\\\partial_t \\\\overline{\\\\rho}$')\n", (5952, 6035), True, 'import matplotlib.pyplot as plt\n'), ((6042, 6138), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'lhs1'], {'color': '"""r"""', 'label': '"""$- \\\\widetilde{u}_x \\\\partial_x \\\\overline{\\\\rho}$"""'}), "(grd1, lhs1, color='r', label=\n '$- \\\\widetilde{u}_x \\\\partial_x \\\\overline{\\\\rho}$')\n", (6050, 6138), True, 'import matplotlib.pyplot as plt\n'), ((6245, 6321), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'rhs0'], {'color': '"""b"""', 'label': '"""$-\\\\overline{\\\\rho} \\\\widetilde{d}$"""'}), "(grd1, rhs0, color='b', label='$-\\\\overline{\\\\rho} \\\\widetilde{d}$')\n", (6253, 6321), True, 'import matplotlib.pyplot as plt\n'), ((6332, 6392), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'res'], {'color': '"""k"""', 'linestyle': '"""--"""', 'label': '"""+res"""'}), "(grd1, res, color='k', linestyle='--', label='+res')\n", (6340, 6392), True, 'import matplotlib.pyplot as plt\n'), ((7654, 7675), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['setxlabel'], {}), '(setxlabel)\n', (7664, 7675), True, 'import matplotlib.pyplot as plt\n'), ((8200, 8210), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8208, 8210), False, 'import sys\n'), ((8275, 8347), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq.png')"], {}), "('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq.png')\n", (8286, 8347), True, 'import matplotlib.pyplot as plt\n'), ((8391, 8463), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq.eps')"], {}), "('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq.eps')\n", (8402, 8463), True, 'import matplotlib.pyplot as plt\n'), ((8814, 8824), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8822, 8824), False, 'import sys\n'), ((10224, 10244), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[ybd, ybu]'], {}), '([ybd, ybu])\n', (10232, 10244), True, 'import matplotlib.pyplot as plt\n'), ((12259, 12269), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12267, 12269), False, 'import sys\n'), ((12334, 12410), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq_bar.png')"], {}), "('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq_bar.png')\n", (12345, 12410), True, 'import matplotlib.pyplot as plt\n'), ((12454, 12530), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq_bar.eps')"], {}), "('RESULTS/' + self.data_prefix + 'continuityFavreDil_eq_bar.eps')\n", (12465, 12530), True, 'import matplotlib.pyplot as plt\n'), ((12752, 12768), 'numpy.log', 'np.log', (['self.vol'], {}), '(self.vol)\n', (12758, 12768), True, 'import numpy as np\n'), ((13831, 13841), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13839, 13841), False, 'import sys\n'), ((13906, 13968), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'mm_vs_MM_eq.png')"], {}), "('RESULTS/' + self.data_prefix + 'mm_vs_MM_eq.png')\n", (13917, 13968), True, 'import matplotlib.pyplot as plt\n'), ((14012, 14074), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('RESULTS/' + self.data_prefix + 'mm_vs_MM_eq.eps')"], {}), "('RESULTS/' + self.data_prefix + 'mm_vs_MM_eq.eps')\n", (14023, 14074), True, 'import matplotlib.pyplot as plt\n'), ((4189, 4210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['setxlabel'], {}), '(setxlabel)\n', (4199, 4210), True, 'import matplotlib.pyplot as plt\n'), ((6432, 6507), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'lhs0'], {'color': '"""g"""', 'label': '"""$-\\\\partial_t (\\\\overline{\\\\rho})$"""'}), "(grd1, lhs0, color='g', label='$-\\\\partial_t (\\\\overline{\\\\rho})$')\n", (6440, 6507), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6616), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'lhs1'], {'color': '"""r"""', 'label': '"""$- \\\\widetilde{u}_r \\\\partial_r (\\\\overline{\\\\rho})$"""'}), "(grd1, lhs1, color='r', label=\n '$- \\\\widetilde{u}_r \\\\partial_r (\\\\overline{\\\\rho})$')\n", (6526, 6616), True, 'import matplotlib.pyplot as plt\n'), ((6621, 6716), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'rhs0'], {'color': '"""b"""', 'label': '"""$-\\\\overline{\\\\rho} \\\\nabla_r (\\\\widetilde{u}_r)$"""'}), "(grd1, rhs0, color='b', label=\n '$-\\\\overline{\\\\rho} \\\\nabla_r (\\\\widetilde{u}_r)$')\n", (6629, 6716), True, 'import matplotlib.pyplot as plt\n'), ((6721, 6781), 'matplotlib.pyplot.plot', 'plt.plot', (['grd1', 'res'], {'color': '"""k"""', 'linestyle': '"""--"""', 'label': '"""+res"""'}), "(grd1, res, color='k', linestyle='--', label='+res')\n", (6729, 6781), True, 'import matplotlib.pyplot as plt\n'), ((7749, 7770), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['setxlabel'], {}), '(setxlabel)\n', (7759, 7770), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3509), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3507, 3509), True, 'import matplotlib.pyplot as plt\n'), ((5558, 5567), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5565, 5567), True, 'import matplotlib.pyplot as plt\n'), ((12889, 12898), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12896, 12898), True, 'import matplotlib.pyplot as plt\n')] |
from collections import OrderedDict, namedtuple
from typing import Union, Tuple, MutableMapping, Optional
from numbers import Number
import numpy as np
import torch
import torch.optim as optim
from rlkit.core.loss import LossFunction
from torch.distributions import Bernoulli
from torch.distributions.kl import kl_divergence
from torch import nn as nn
import rlkit.torch.pytorch_util as ptu
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.misc.ml_util import ScalarSchedule, ConstantSchedule
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.core.logging import add_prefix
PGRLosses = namedtuple(
'SACLosses',
'policy_loss qf1_loss qf2_loss alpha_loss',
)
class PGRTrainer(TorchTrainer, LossFunction):
NORMAL_REWARD = 'normal'
DISCOUNTED_REWARD = 'discounted'
DISCOUNTED_PLUS_TIME_KL = 'discounted_plus_time_kl'
LEARNED_DISCOUNT = 'learned'
PRIOR_DISCOUNT = 'prior'
COMPUTED_DISCOUNT = 'computed_from_qr'
COMPUTED_DISCOUNT_NO_PRIOR = 'computed_from_qr_no_prior'
def __init__(
self,
env,
policy,
qf1,
qf2,
target_qf1,
target_qf2,
reward_type,
discount_type=None,
discount_model=None,
discount=0.99,
prior_discount_weight_schedule: Optional[ScalarSchedule] = None,
multiply_bootstrap_by_prior_discount=False,
upper_bound_discount_by_prior=False,
reward_scale=1.0,
reward_tracking_momentum=0.999,
auto_init_qf_bias=False,
policy_lr=1e-3,
qf_lr=1e-3,
optimizer_class=optim.Adam,
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None,
):
"""
:param env:
:param policy:
:param qf1:
:param qf2:
:param target_qf1:
:param target_qf2:
:param reward_type:
:param discount_type:
:param discount_model:
:param discount:
:param prior_discount_weight_schedule:
At epoch i, use the discount
discount = c_i * prior_discount + (1-c_i) posterior_discount
:param multiply_bootstrap_by_prior_discount:
If true, when you compute the discount prior, always multiply it by the
prior discount in addition to the normal prior discount.
:param upper_bound_discount_by_prior:
Always upper-bound the discount by the prior.
:param reward_scale:
:param reward_tracking_momentum:
:param policy_lr:
:param qf_lr:
:param optimizer_class:
:param soft_target_tau:
:param target_update_period:
:param plotter:
:param render_eval_paths:
:param use_automatic_entropy_tuning:
:param target_entropy:
"""
if reward_type not in {
self.NORMAL_REWARD,
self.DISCOUNTED_REWARD,
self.DISCOUNTED_PLUS_TIME_KL,
}:
raise ValueError("Invalid reward type: {}".format(reward_type))
if discount_type is None: # preserve old behavior
if reward_type == self.DISCOUNTED_PLUS_TIME_KL:
discount_type = self.COMPUTED_DISCOUNT
else:
discount_type = self.PRIOR_DISCOUNT
if discount_type not in {
self.PRIOR_DISCOUNT,
self.LEARNED_DISCOUNT,
self.COMPUTED_DISCOUNT,
self.COMPUTED_DISCOUNT_NO_PRIOR,
}:
raise ValueError("Invalid discount type: {}".format(
discount_type
))
if (
reward_type == self.LEARNED_DISCOUNT
and discount_model is None
):
raise ValueError(
"Need to set discount_model for using mode {}".format(
reward_type
)
)
if not isinstance(reward_scale, Number) and reward_scale not in {
'auto_normalize_by_max_magnitude',
'auto_normalize_by_max_magnitude_times_10',
'auto_normalize_by_max_magnitude_times_100',
'auto_normalize_by_max_magnitude_times_invsig_prior',
'auto_normalize_by_mean_magnitude',
}:
raise ValueError("Invalid reward_scale type: {}".format(
reward_scale
))
super().__init__()
self.env = env
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
self.reward_type = reward_type
self.discount_type = discount_type
if prior_discount_weight_schedule is None:
prior_discount_weight_schedule = ConstantSchedule(0.)
self._prior_discount_weight_schedule = prior_discount_weight_schedule
self._multiply_bootstrap_by_prior_discount = (
multiply_bootstrap_by_prior_discount
)
self._upper_bound_discount_by_prior = (
upper_bound_discount_by_prior
)
self._auto_init_qf_bias = auto_init_qf_bias
self._current_epoch = 0
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy is None:
# Use heuristic value from SAC paper
self.target_entropy = -np.prod(
self.env.action_space.shape).item()
else:
self.target_entropy = target_entropy
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.discount_model = discount_model
self.discount = discount
self.prior_on_discount = Bernoulli(self.discount)
self._reward_scale = reward_scale
self.reward_tracking_momentum = reward_tracking_momentum
self._reward_normalizer = ptu.from_numpy(np.array(1))
self.eval_statistics = OrderedDict()
self._need_to_update_eval_statistics = True
self._qfs_were_initialized = False
def train_from_torch(self, batch):
if self._need_to_update_eval_statistics:
losses, stats = self.compute_loss(batch, return_statistics=True)
# Compute statistics using only one batch per epoch
self._need_to_update_eval_statistics = False
self.eval_statistics = stats
else:
losses = self.compute_loss(batch)
"""
Update networks
"""
if self.use_automatic_entropy_tuning:
self.alpha_optimizer.zero_grad()
losses.alpha_loss.backward()
self.alpha_optimizer.step()
self.policy_optimizer.zero_grad()
losses.policy_loss.backward()
self.policy_optimizer.step()
self.qf1_optimizer.zero_grad()
losses.qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
losses.qf2_loss.backward()
self.qf2_optimizer.step()
if self._reward_scale == 'auto_normalize_by_mean_magnitude':
rewards = batch['rewards']
self._reward_normalizer = (
self._reward_normalizer * self.reward_tracking_momentum
+ rewards.abs().mean() * (1 - self.reward_tracking_momentum)
)
elif self._reward_scale in {
'auto_normalize_by_max_magnitude',
'auto_normalize_by_max_magnitude_times_10',
'auto_normalize_by_max_magnitude_times_100',
'auto_normalize_by_max_magnitude_times_invsig_prior',
}:
rewards = batch['rewards']
self._reward_normalizer = (
self._reward_normalizer * self.reward_tracking_momentum
+ rewards.abs().max() * (1 - self.reward_tracking_momentum)
)
elif isinstance(self._reward_scale, Number):
pass
else:
raise NotImplementedError()
if self._num_train_steps % self.target_update_period == 0:
self.update_target_networks()
self._num_train_steps += 1
if self._need_to_update_eval_statistics:
# Compute statistics using only one batch per epoch
self._need_to_update_eval_statistics = False
def update_target_networks(self):
ptu.soft_update_from_to(
self.qf1, self.target_qf1, self.soft_target_tau
)
ptu.soft_update_from_to(
self.qf2, self.target_qf2, self.soft_target_tau
)
def compute_loss(
self, batch, return_statistics=False,
) -> Union[PGRLosses, Tuple[PGRLosses, MutableMapping]]:
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
eval_statistics = OrderedDict()
"""
Policy and Alpha Loss
"""
dist = self.policy(obs)
new_obs_actions, log_pi = dist.rsample_and_logprob()
log_pi = log_pi.unsqueeze(-1)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (
log_pi + self.target_entropy).detach()).mean()
else:
alpha_loss = 0
alpha = self.get_alpha()
if not self._qfs_were_initialized and self._auto_init_qf_bias:
average_value = (rewards - alpha * log_pi).mean()
self.qf1.last_fc.bias.data = average_value
self.qf2.last_fc.bias.data = average_value
self._qfs_were_initialized = False
q_new_actions = torch.min(
self.qf1(obs, new_obs_actions),
self.qf2(obs, new_obs_actions),
)
"""
QF Loss
"""
bootstrap_value, q1_pred, q2_pred, bootstrap_log_pi_term = (
self.get_bootstrap_stats(
obs,
actions,
next_obs,
))
# Use the unscaled bootstrap values/rewards so that the weight on the
# the Q-value/reward has the correct scale relative to the other terms
raw_discount = self.get_discount_factor(
bootstrap_value,
rewards,
obs,
actions,
)
discount = (
self._weight_on_prior_discount * self.discount
+ (1 - self._weight_on_prior_discount) * raw_discount
)
q_target = self._compute_target_q_value(
discount,
rewards,
terminals,
bootstrap_value,
eval_statistics,
return_statistics,
)
policy_loss = (alpha * log_pi - q_new_actions).mean()
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
"""
Save some statistics for eval
"""
if return_statistics:
eval_statistics.update(create_stats_ordered_dict(
'rewards',
ptu.get_numpy(rewards),
))
eval_statistics.update(create_stats_ordered_dict(
'bootstrap log pi',
ptu.get_numpy(bootstrap_log_pi_term),
))
if isinstance(discount, torch.Tensor):
eval_statistics.update(create_stats_ordered_dict(
'discount factor',
ptu.get_numpy(raw_discount),
))
else:
eval_statistics.update(create_stats_ordered_dict(
'discount factor',
np.array([raw_discount]),
))
if isinstance(discount, torch.Tensor):
eval_statistics.update(create_stats_ordered_dict(
'used discount factor',
ptu.get_numpy(discount),
))
else:
eval_statistics.update(create_stats_ordered_dict(
'used discount factor',
np.array([discount]),
))
eval_statistics[
'weight on prior discount'] = self._weight_on_prior_discount
reward_scale = self.reward_scale
if isinstance(reward_scale, torch.Tensor):
reward_scale = ptu.get_numpy(reward_scale)
eval_statistics['reward scale'] = reward_scale
eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(policy_loss))
eval_statistics['Policy Q-only Loss'] = np.mean(ptu.get_numpy(
-q_new_actions
))
eval_statistics['Policy entropy-only Loss'] = np.mean(ptu.get_numpy(
alpha * log_pi
))
eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
policy_statistics = add_prefix(dist.get_diagnostics(), "policy/")
eval_statistics.update(policy_statistics)
if self.use_automatic_entropy_tuning:
eval_statistics['Alpha'] = alpha.item()
eval_statistics['Alpha Loss'] = alpha_loss.item()
losses = PGRLosses(
policy_loss=policy_loss,
qf1_loss=qf1_loss,
qf2_loss=qf2_loss,
alpha_loss=alpha_loss,
)
if return_statistics:
return losses, eval_statistics
else:
return losses
def get_bootstrap_stats(self, obs, actions, next_obs):
q1_pred = self.qf1(obs, actions)
q2_pred = self.qf2(obs, actions)
next_dist = self.policy(next_obs)
new_next_actions, new_log_pi = next_dist.rsample_and_logprob()
new_log_pi = new_log_pi.unsqueeze(-1)
alpha = self.get_alpha()
bootstrap_log_pi_term = - alpha * new_log_pi
bootstrap_value = torch.min(
self.target_qf1(next_obs, new_next_actions),
self.target_qf2(next_obs, new_next_actions),
) + bootstrap_log_pi_term
return bootstrap_value, q1_pred, q2_pred, bootstrap_log_pi_term
@property
def reward_scale(self):
if self._reward_scale == 'auto_normalize_by_max_magnitude':
return 1. / self._reward_normalizer
elif self._reward_scale == 'auto_normalize_by_max_magnitude_times_10':
return 10. / self._reward_normalizer
elif self._reward_scale == 'auto_normalize_by_max_magnitude_times_100':
return 100. / self._reward_normalizer
elif self._reward_scale == 'auto_normalize_by_max_magnitude_times_invsig_prior':
return (np.log(self.discount) - np.log(1 - self.discount)
) / self._reward_normalizer
elif self._reward_scale == 'auto_normalize_by_mean_magnitude':
return 1. / self._reward_normalizer
elif isinstance(self._reward_scale, Number):
return self._reward_scale
else:
raise ValueError(self._reward_scale)
def _compute_target_q_value(
self,
discount,
rewards,
terminals,
bootstrap_value,
statistics_log,
update_statistics,
):
scaled_rewards = rewards * self.reward_scale
del rewards
if self.reward_type == self.NORMAL_REWARD:
reward_target = scaled_rewards
elif self.reward_type == self.DISCOUNTED_REWARD:
reward_target = scaled_rewards * (1 - discount)
elif self.reward_type == self.DISCOUNTED_PLUS_TIME_KL:
kl_reward = kl_divergence(
Bernoulli(discount),
self.prior_on_discount,
)
reward_target = (
scaled_rewards * (1 - discount) + kl_reward
)
if update_statistics:
statistics_log.update(create_stats_ordered_dict(
'time_kl_reward',
ptu.get_numpy(kl_reward),
))
statistics_log.update(create_stats_ordered_dict(
'inferred_discount',
ptu.get_numpy(discount),
))
else:
raise ValueError("Unknown update type".format(self.reward_type))
if self._multiply_bootstrap_by_prior_discount:
bootstrap_target = (
(1. - terminals) * discount * bootstrap_value * self.discount
)
else:
bootstrap_target = (
(1. - terminals) * discount * bootstrap_value
)
q_target = reward_target + bootstrap_target
return q_target
def get_discount_factor(
self, bootstrap_value, unscaled_reward, obs, action
):
# TODO: train a separate Q-value for the log-pi terms so that the reward
# scale matches
prior_discount = self.discount # rename for readability
if self.discount_type == self.PRIOR_DISCOUNT:
discount = prior_discount
elif self.discount_type == self.LEARNED_DISCOUNT:
discount = self.discount_model(obs, action)
elif self.discount_type == self.COMPUTED_DISCOUNT:
# large reward or tiny prior ==> small current discount
discount = torch.sigmoid(
bootstrap_value
- unscaled_reward * self.reward_scale
+ np.log(prior_discount / (1 - prior_discount))
).detach()
elif self.discount_type == self.COMPUTED_DISCOUNT_NO_PRIOR:
# large reward or tiny prior ==> small current discount
discount = torch.sigmoid(
bootstrap_value
- unscaled_reward * self.reward_scale
).detach()
else:
raise ValueError("Unknown discount type".format(
self.discount_type
))
if self._upper_bound_discount_by_prior and discount is not prior_discount:
discount = torch.clamp(discount, max=prior_discount)
return discount
def get_alpha(self):
if self.use_automatic_entropy_tuning:
alpha = self.log_alpha.exp()
else:
alpha = 1
return alpha
def get_diagnostics(self):
stats = super().get_diagnostics()
stats.update(self.eval_statistics)
return stats
@property
def _weight_on_prior_discount(self):
return self._prior_discount_weight_schedule.get_value(
self._current_epoch
)
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
self._current_epoch = epoch + 1
@property
def networks(self):
return [
self.policy,
self.qf1,
self.qf2,
self.target_qf1,
self.target_qf2,
]
def get_snapshot(self):
return dict(
policy=self.policy,
qf1=self.qf1,
qf2=self.qf2,
target_qf1=self.target_qf1,
target_qf2=self.target_qf2,
)
| [
"rlkit.torch.pytorch_util.soft_update_from_to",
"torch.distributions.Bernoulli",
"torch.nn.MSELoss",
"rlkit.torch.pytorch_util.zeros",
"rlkit.torch.pytorch_util.get_numpy",
"numpy.log",
"rlkit.misc.ml_util.ConstantSchedule",
"torch.sigmoid",
"torch.clamp",
"numpy.array",
"collections.namedtuple"... | [((627, 694), 'collections.namedtuple', 'namedtuple', (['"""SACLosses"""', '"""policy_loss qf1_loss qf2_loss alpha_loss"""'], {}), "('SACLosses', 'policy_loss qf1_loss qf2_loss alpha_loss')\n", (637, 694), False, 'from collections import OrderedDict, namedtuple\n'), ((6114, 6126), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6124, 6126), True, 'from torch import nn as nn\n'), ((6155, 6167), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6165, 6167), True, 'from torch import nn as nn\n'), ((6630, 6654), 'torch.distributions.Bernoulli', 'Bernoulli', (['self.discount'], {}), '(self.discount)\n', (6639, 6654), False, 'from torch.distributions import Bernoulli\n'), ((6855, 6868), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6866, 6868), False, 'from collections import OrderedDict, namedtuple\n'), ((9239, 9311), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf1', 'self.target_qf1', 'self.soft_target_tau'], {}), '(self.qf1, self.target_qf1, self.soft_target_tau)\n', (9262, 9311), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9342, 9414), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf2', 'self.target_qf2', 'self.soft_target_tau'], {}), '(self.qf2, self.target_qf2, self.soft_target_tau)\n', (9365, 9414), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9788, 9801), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9799, 9801), False, 'from collections import OrderedDict, namedtuple\n'), ((5026, 5047), 'rlkit.misc.ml_util.ConstantSchedule', 'ConstantSchedule', (['(0.0)'], {}), '(0.0)\n', (5042, 5047), False, 'from rlkit.misc.ml_util import ScalarSchedule, ConstantSchedule\n'), ((5839, 5871), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (5848, 5871), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6811, 6822), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (6819, 6822), True, 'import numpy as np\n'), ((19481, 19522), 'torch.clamp', 'torch.clamp', (['discount'], {'max': 'prior_discount'}), '(discount, max=prior_discount)\n', (19492, 19522), False, 'import torch\n'), ((13223, 13250), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['reward_scale'], {}), '(reward_scale)\n', (13236, 13250), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((13360, 13383), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf1_loss'], {}), '(qf1_loss)\n', (13373, 13383), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((13435, 13458), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf2_loss'], {}), '(qf2_loss)\n', (13448, 13458), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((13513, 13539), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (13526, 13539), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((13601, 13630), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['(-q_new_actions)'], {}), '(-q_new_actions)\n', (13614, 13630), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((13728, 13757), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['(alpha * log_pi)'], {}), '(alpha * log_pi)\n', (13741, 13757), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((11954, 11976), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['rewards'], {}), '(rewards)\n', (11967, 11976), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12107, 12143), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['bootstrap_log_pi_term'], {}), '(bootstrap_log_pi_term)\n', (12120, 12143), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((13901, 13923), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q1_pred'], {}), '(q1_pred)\n', (13914, 13923), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14052, 14074), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q2_pred'], {}), '(q2_pred)\n', (14065, 14074), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14198, 14221), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_target'], {}), '(q_target)\n', (14211, 14221), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14343, 14364), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['log_pi'], {}), '(log_pi)\n', (14356, 14364), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12336, 12363), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['raw_discount'], {}), '(raw_discount)\n', (12349, 12363), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12527, 12551), 'numpy.array', 'np.array', (['[raw_discount]'], {}), '([raw_discount])\n', (12535, 12551), True, 'import numpy as np\n'), ((12753, 12776), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['discount'], {}), '(discount)\n', (12766, 12776), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12945, 12965), 'numpy.array', 'np.array', (['[discount]'], {}), '([discount])\n', (12953, 12965), True, 'import numpy as np\n'), ((17127, 17146), 'torch.distributions.Bernoulli', 'Bernoulli', (['discount'], {}), '(discount)\n', (17136, 17146), False, 'from torch.distributions import Bernoulli\n'), ((5674, 5710), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (5681, 5710), True, 'import numpy as np\n'), ((16141, 16162), 'numpy.log', 'np.log', (['self.discount'], {}), '(self.discount)\n', (16147, 16162), True, 'import numpy as np\n'), ((16165, 16190), 'numpy.log', 'np.log', (['(1 - self.discount)'], {}), '(1 - self.discount)\n', (16171, 16190), True, 'import numpy as np\n'), ((17467, 17491), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['kl_reward'], {}), '(kl_reward)\n', (17480, 17491), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((17638, 17661), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['discount'], {}), '(discount)\n', (17651, 17661), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((19126, 19194), 'torch.sigmoid', 'torch.sigmoid', (['(bootstrap_value - unscaled_reward * self.reward_scale)'], {}), '(bootstrap_value - unscaled_reward * self.reward_scale)\n', (19139, 19194), False, 'import torch\n'), ((18898, 18943), 'numpy.log', 'np.log', (['(prior_discount / (1 - prior_discount))'], {}), '(prior_discount / (1 - prior_discount))\n', (18904, 18943), True, 'import numpy as np\n')] |
import numpy as np
import torch
from debugq.algos import sampling_fqi
import debugq.pytorch_util as ptu
from rlutil.envs.tabular import q_iteration
import random
from rlutil.logging import logger
PROB_EPS = 1e-8
class ReplayBufferFQI(sampling_fqi.PolicySamplingFQI):
def __init__(self, env, network, replay_buffer=None, batch_size=32, **kwargs):
super(ReplayBufferFQI, self).__init__(env, network, **kwargs)
self.replay_buffer = replay_buffer
self.batch_size = batch_size
def pre_project(self):
super(ReplayBufferFQI, self).pre_project()
# add samples to replay buffer
self.replay_buffer.add_all(self.batch_s, self.batch_a,
self.batch_ns, self.batch_r)
logger.record_tabular('replay_buffer_len', len(self.replay_buffer))
def get_sample_states(self, itr=0):
samples = self.replay_buffer.sample(self.batch_size)
weights = self.compute_weights(samples, itr=itr)
return samples + (weights, )
def compute_weights(self, samples, itr=0):
return np.ones(self.batch_size)
class WeightedBufferFQI(ReplayBufferFQI):
def __init__(self, env, network, weighting_scheme='none',
**kwargs):
super(WeightedBufferFQI, self).__init__(env, network, **kwargs)
self.wscheme = weighting_scheme
self.vfn = q_iteration.logsumexp(
self.ground_truth_q, alpha=self.ent_wt)
self.optimal_visit_sa = q_iteration.compute_visitation(self.env, self.ground_truth_q, ent_wt=self.ent_wt,
discount=self.discount, env_time_limit=self.time_limit)
self.warmstart_adversarial_q = np.zeros_like(self.ground_truth_q)
def pre_project(self):
super(WeightedBufferFQI, self).pre_project()
self.sa_weights = sampling_fqi.compute_sa_weights(self, self.wscheme, self.replay_buffer.probs_sa())
self.validation_sa_weights = self.sa_weights * self.replay_buffer.probs_sa()
# validation loss
self.buffer_validation_sa_weights = self.replay_buffer.probs_sa()
sample_visit_sa = q_iteration.compute_visitation(self.env, self.sampling_q, ent_wt=self.ent_wt,
discount=self.discount, env_time_limit=self.time_limit)
self.onpolicy_validation_sa_weights = sample_visit_sa
def compute_weights(self, samples, itr=0):
return sampling_fqi.compute_weights(self, samples, itr=itr)
def post_project(self):
# Log validation loss
expected_loss = np.sum(self.onpolicy_validation_sa_weights * (self.current_q - self.all_target_q_np)**2)
logger.record_tabular('validation_loss_sampling', expected_loss)
expected_loss = np.sum(self.validation_sa_weights * (self.current_q - self.all_target_q_np)**2)
logger.record_tabular('validation_loss_reweighted', expected_loss)
expected_loss = np.sum(self.buffer_validation_sa_weights * (self.current_q - self.all_target_q_np)**2)
logger.record_tabular('validation_loss_buffer', expected_loss)
class DQN(WeightedBufferFQI):
def __init__(self, env, network, replay_buffer_size=1000, **kwargs):
replay_buffer = SimpleReplayBuffer(replay_buffer_size)
super(DQN, self).__init__(env, network,
replay_buffer=replay_buffer, **kwargs)
class TabularBufferDQN(WeightedBufferFQI):
def __init__(self, env, network, replay_buffer_size=1000, **kwargs):
replay_buffer = TabularReplayBuffer(env)
super(TabularBufferDQN, self).__init__(
env, network, replay_buffer=replay_buffer, **kwargs)
class ReplayBuffer(object):
def __init__(self):
pass
def add_all(self, s, a, ns, r):
for i in range(len(s)):
self.add(s[i], a[i], ns[i], r[i])
def add(self, s, a, ns, r):
raise NotImplementedError()
def sample(self, batch_size):
raise NotImplementedError()
def probs(self):
raise NotImplementedError()
def probs_sa(self):
return np.sum(self.probs(), axis=2)
class SimpleReplayBuffer(ReplayBuffer):
def __init__(self, capacity=10000):
self.capacity = capacity
self._s = np.zeros(capacity, dtype=np.int32)
self._a = np.zeros(capacity, dtype=np.int32)
self._ns = np.zeros(capacity, dtype=np.int32)
self._r = np.zeros(capacity, dtype=np.float32)
self._wt = np.zeros(capacity, dtype=np.float32)
self._cur_idx = 0
self._len = 0
def all(self):
return self._s[:len(self)], self._a[:len(self)], \
self._ns[:len(self)], self._r[:len(self)]
def add(self, s, a, ns, r):
self._s[self._cur_idx] = s
self._a[self._cur_idx] = a
self._ns[self._cur_idx] = ns
self._r[self._cur_idx] = r
self._cur_idx += 1
if self._cur_idx >= self.capacity:
self._cur_idx = 0
self._len = min(self.capacity, self._len + 1)
def sample(self, batch_size):
idxs = np.random.randint(0, len(self), size=batch_size)
return self._s[idxs], self._a[idxs], self._ns[idxs], self._r[idxs]
def __len__(self):
return self._len
class TabularReplayBuffer(ReplayBuffer):
"""
A replay buffer which stores transitions in tabular form. The capacity is essentially machine precision, and
allows for easy computation of probabilities.
"""
def __init__(self, env):
self.num_states = env.num_states
self.num_actions = env.num_actions
self._transitions = np.zeros(
(self.num_states, self.num_actions, self.num_states), dtype=np.int32)
self._reward = np.zeros(
(self.num_states, self.num_actions, self.num_states), dtype=np.float32)
self._len = 0
self.normalized = False
self._normalized_transitions = np.zeros_like(self._transitions)
self._nonzero_transitions = None
self._nonzero_probs = None
def add(self, s, a, ns, r):
self._transitions[s, a, ns] += 1
self._reward[s, a, ns] = r
self._len += 1
self.normalized = False
def probs(self):
if not self.normalized:
self._normalized_transitions = self._transitions / float(self._len)
self._nonzero_transitions = np.where(self._normalized_transitions)
self._nonzero_probs = self._normalized_transitions[self._nonzero_transitions]
self.normalized = True
return self._normalized_transitions
def sample(self, batch_size):
probs = self.probs()
items = np.random.choice(
len(self._nonzero_transitions[0]), size=batch_size, p=self._nonzero_probs)
idxs = [x[items] for x in self._nonzero_transitions]
s_ = idxs[0]
a_ = idxs[1]
ns_ = idxs[2]
r_ = np.array([self._reward[s, a, ns]
for s, a, ns in zip(s_, a_, ns_)])
return s_, a_, ns_, r_
def __len__(self):
return self._len
| [
"numpy.zeros_like",
"numpy.sum",
"debugq.algos.sampling_fqi.compute_weights",
"rlutil.envs.tabular.q_iteration.logsumexp",
"numpy.zeros",
"numpy.ones",
"rlutil.logging.logger.record_tabular",
"numpy.where",
"rlutil.envs.tabular.q_iteration.compute_visitation"
] | [((1084, 1108), 'numpy.ones', 'np.ones', (['self.batch_size'], {}), '(self.batch_size)\n', (1091, 1108), True, 'import numpy as np\n'), ((1375, 1436), 'rlutil.envs.tabular.q_iteration.logsumexp', 'q_iteration.logsumexp', (['self.ground_truth_q'], {'alpha': 'self.ent_wt'}), '(self.ground_truth_q, alpha=self.ent_wt)\n', (1396, 1436), False, 'from rlutil.envs.tabular import q_iteration\n'), ((1482, 1624), 'rlutil.envs.tabular.q_iteration.compute_visitation', 'q_iteration.compute_visitation', (['self.env', 'self.ground_truth_q'], {'ent_wt': 'self.ent_wt', 'discount': 'self.discount', 'env_time_limit': 'self.time_limit'}), '(self.env, self.ground_truth_q, ent_wt=self.\n ent_wt, discount=self.discount, env_time_limit=self.time_limit)\n', (1512, 1624), False, 'from rlutil.envs.tabular import q_iteration\n'), ((1723, 1757), 'numpy.zeros_like', 'np.zeros_like', (['self.ground_truth_q'], {}), '(self.ground_truth_q)\n', (1736, 1757), True, 'import numpy as np\n'), ((2160, 2298), 'rlutil.envs.tabular.q_iteration.compute_visitation', 'q_iteration.compute_visitation', (['self.env', 'self.sampling_q'], {'ent_wt': 'self.ent_wt', 'discount': 'self.discount', 'env_time_limit': 'self.time_limit'}), '(self.env, self.sampling_q, ent_wt=self.\n ent_wt, discount=self.discount, env_time_limit=self.time_limit)\n', (2190, 2298), False, 'from rlutil.envs.tabular import q_iteration\n'), ((2431, 2483), 'debugq.algos.sampling_fqi.compute_weights', 'sampling_fqi.compute_weights', (['self', 'samples'], {'itr': 'itr'}), '(self, samples, itr=itr)\n', (2459, 2483), False, 'from debugq.algos import sampling_fqi\n'), ((2567, 2662), 'numpy.sum', 'np.sum', (['(self.onpolicy_validation_sa_weights * (self.current_q - self.\n all_target_q_np) ** 2)'], {}), '(self.onpolicy_validation_sa_weights * (self.current_q - self.\n all_target_q_np) ** 2)\n', (2573, 2662), True, 'import numpy as np\n'), ((2664, 2728), 'rlutil.logging.logger.record_tabular', 'logger.record_tabular', (['"""validation_loss_sampling"""', 'expected_loss'], {}), "('validation_loss_sampling', expected_loss)\n", (2685, 2728), False, 'from rlutil.logging import logger\n'), ((2753, 2838), 'numpy.sum', 'np.sum', (['(self.validation_sa_weights * (self.current_q - self.all_target_q_np) ** 2)'], {}), '(self.validation_sa_weights * (self.current_q - self.all_target_q_np) **\n 2)\n', (2759, 2838), True, 'import numpy as np\n'), ((2841, 2907), 'rlutil.logging.logger.record_tabular', 'logger.record_tabular', (['"""validation_loss_reweighted"""', 'expected_loss'], {}), "('validation_loss_reweighted', expected_loss)\n", (2862, 2907), False, 'from rlutil.logging import logger\n'), ((2932, 3025), 'numpy.sum', 'np.sum', (['(self.buffer_validation_sa_weights * (self.current_q - self.all_target_q_np\n ) ** 2)'], {}), '(self.buffer_validation_sa_weights * (self.current_q - self.\n all_target_q_np) ** 2)\n', (2938, 3025), True, 'import numpy as np\n'), ((3027, 3089), 'rlutil.logging.logger.record_tabular', 'logger.record_tabular', (['"""validation_loss_buffer"""', 'expected_loss'], {}), "('validation_loss_buffer', expected_loss)\n", (3048, 3089), False, 'from rlutil.logging import logger\n'), ((4247, 4281), 'numpy.zeros', 'np.zeros', (['capacity'], {'dtype': 'np.int32'}), '(capacity, dtype=np.int32)\n', (4255, 4281), True, 'import numpy as np\n'), ((4300, 4334), 'numpy.zeros', 'np.zeros', (['capacity'], {'dtype': 'np.int32'}), '(capacity, dtype=np.int32)\n', (4308, 4334), True, 'import numpy as np\n'), ((4354, 4388), 'numpy.zeros', 'np.zeros', (['capacity'], {'dtype': 'np.int32'}), '(capacity, dtype=np.int32)\n', (4362, 4388), True, 'import numpy as np\n'), ((4407, 4443), 'numpy.zeros', 'np.zeros', (['capacity'], {'dtype': 'np.float32'}), '(capacity, dtype=np.float32)\n', (4415, 4443), True, 'import numpy as np\n'), ((4463, 4499), 'numpy.zeros', 'np.zeros', (['capacity'], {'dtype': 'np.float32'}), '(capacity, dtype=np.float32)\n', (4471, 4499), True, 'import numpy as np\n'), ((5597, 5675), 'numpy.zeros', 'np.zeros', (['(self.num_states, self.num_actions, self.num_states)'], {'dtype': 'np.int32'}), '((self.num_states, self.num_actions, self.num_states), dtype=np.int32)\n', (5605, 5675), True, 'import numpy as np\n'), ((5712, 5797), 'numpy.zeros', 'np.zeros', (['(self.num_states, self.num_actions, self.num_states)'], {'dtype': 'np.float32'}), '((self.num_states, self.num_actions, self.num_states), dtype=np.float32\n )\n', (5720, 5797), True, 'import numpy as np\n'), ((5900, 5932), 'numpy.zeros_like', 'np.zeros_like', (['self._transitions'], {}), '(self._transitions)\n', (5913, 5932), True, 'import numpy as np\n'), ((6347, 6385), 'numpy.where', 'np.where', (['self._normalized_transitions'], {}), '(self._normalized_transitions)\n', (6355, 6385), True, 'import numpy as np\n')] |
import numpy
import thinkdsp
import thinkplot
import matplotlib.pyplot as plt
import wave
import sys
import librosa
from functools import reduce
from scipy import signal as sig
numpy.set_printoptions(threshold=numpy.inf)
#numpy.set_printoptions(threshold=10)
#short_pop = thinkdsp.read_wave('short_pops.wav')
#open wave with read only
short_pop_wave = wave.open('short_pops.wav','r')
y, s = librosa.load('short_pops.wav', sr=100) # Downsample 44.1kHz to 8kHz
#Extract Raw Audio from Wav File
signal = short_pop_wave.readframes(-1)
b, a = sig.butter(1, 0.15)
zi = sig.lfilter_zi(b, a)
z, _ = sig.lfilter(b, a, y, zi=zi*y[0])
z2, _ = sig.lfilter(b, a, z, zi=zi*z[0])
low_y = sig.filtfilt(b, a, y)
signal = numpy.fromstring(signal, 'Int16')
#print(signal)
size = len(signal)
print(size)
framerate = short_pop_wave.getframerate()
print(framerate)
popcorn_fft = numpy.fft.rfft(signal)
threshold = 2500
def aboveThreshold(x):
if x > threshold:
return 1
else:
return 0
#creates array the same size as signal data array, every value is threshold
thresholdArray = numpy.full(size, threshold)
booleanArray = list(map(aboveThreshold, signal))
#print(booleanArray)
gradientArray = numpy.gradient(booleanArray)
#print(gradientArray)
#do hella gradients?
filteredArray = list(filter(lambda x: x > 0, gradientArray))
print(len(filteredArray)/2)
librosa.output.write_wav('low_pass.wav', low_y, s)
plt.figure(0)
plt.title('Librosa low pass')
plt.plot(low_y)
plt.grid()
plt.show()
plt.figure(1)
plt.title('Librosa downsample')
plt.plot(y)
plt.grid()
plt.show()
plt.figure(2)
plt.title('FFT')
plt.plot(popcorn_fft)
plt.grid()
plt.show()
plt.figure(3)
plt.title('Signal Wave')
plt.plot(signal)
plt.plot([0, 200000], [threshold, threshold], 'k-', lw=2)
plt.show()
| [
"numpy.full",
"scipy.signal.lfilter_zi",
"wave.open",
"numpy.set_printoptions",
"numpy.fft.rfft",
"matplotlib.pyplot.title",
"scipy.signal.filtfilt",
"matplotlib.pyplot.plot",
"scipy.signal.lfilter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.fromstring",
"librosa.load",
... | [((177, 220), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'threshold': 'numpy.inf'}), '(threshold=numpy.inf)\n', (199, 220), False, 'import numpy\n'), ((353, 385), 'wave.open', 'wave.open', (['"""short_pops.wav"""', '"""r"""'], {}), "('short_pops.wav', 'r')\n", (362, 385), False, 'import wave\n'), ((392, 430), 'librosa.load', 'librosa.load', (['"""short_pops.wav"""'], {'sr': '(100)'}), "('short_pops.wav', sr=100)\n", (404, 430), False, 'import librosa\n'), ((545, 564), 'scipy.signal.butter', 'sig.butter', (['(1)', '(0.15)'], {}), '(1, 0.15)\n', (555, 564), True, 'from scipy import signal as sig\n'), ((570, 590), 'scipy.signal.lfilter_zi', 'sig.lfilter_zi', (['b', 'a'], {}), '(b, a)\n', (584, 590), True, 'from scipy import signal as sig\n'), ((598, 632), 'scipy.signal.lfilter', 'sig.lfilter', (['b', 'a', 'y'], {'zi': '(zi * y[0])'}), '(b, a, y, zi=zi * y[0])\n', (609, 632), True, 'from scipy import signal as sig\n'), ((639, 673), 'scipy.signal.lfilter', 'sig.lfilter', (['b', 'a', 'z'], {'zi': '(zi * z[0])'}), '(b, a, z, zi=zi * z[0])\n', (650, 673), True, 'from scipy import signal as sig\n'), ((680, 701), 'scipy.signal.filtfilt', 'sig.filtfilt', (['b', 'a', 'y'], {}), '(b, a, y)\n', (692, 701), True, 'from scipy import signal as sig\n'), ((713, 746), 'numpy.fromstring', 'numpy.fromstring', (['signal', '"""Int16"""'], {}), "(signal, 'Int16')\n", (729, 746), False, 'import numpy\n'), ((867, 889), 'numpy.fft.rfft', 'numpy.fft.rfft', (['signal'], {}), '(signal)\n', (881, 889), False, 'import numpy\n'), ((1092, 1119), 'numpy.full', 'numpy.full', (['size', 'threshold'], {}), '(size, threshold)\n', (1102, 1119), False, 'import numpy\n'), ((1206, 1234), 'numpy.gradient', 'numpy.gradient', (['booleanArray'], {}), '(booleanArray)\n', (1220, 1234), False, 'import numpy\n'), ((1370, 1420), 'librosa.output.write_wav', 'librosa.output.write_wav', (['"""low_pass.wav"""', 'low_y', 's'], {}), "('low_pass.wav', low_y, s)\n", (1394, 1420), False, 'import librosa\n'), ((1422, 1435), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (1432, 1435), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1465), 'matplotlib.pyplot.title', 'plt.title', (['"""Librosa low pass"""'], {}), "('Librosa low pass')\n", (1445, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1481), 'matplotlib.pyplot.plot', 'plt.plot', (['low_y'], {}), '(low_y)\n', (1474, 1481), True, 'import matplotlib.pyplot as plt\n'), ((1482, 1492), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1490, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1503), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1501, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1505, 1518), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1515, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1550), 'matplotlib.pyplot.title', 'plt.title', (['"""Librosa downsample"""'], {}), "('Librosa downsample')\n", (1528, 1550), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1562), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y)\n', (1559, 1562), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1573), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1571, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1584), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1582, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1601), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1598, 1601), True, 'import matplotlib.pyplot as plt\n'), ((1602, 1618), 'matplotlib.pyplot.title', 'plt.title', (['"""FFT"""'], {}), "('FFT')\n", (1611, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1640), 'matplotlib.pyplot.plot', 'plt.plot', (['popcorn_fft'], {}), '(popcorn_fft)\n', (1627, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1651), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1649, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1652, 1662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1660, 1662), True, 'import matplotlib.pyplot as plt\n'), ((1666, 1679), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (1676, 1679), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1704), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal Wave"""'], {}), "('Signal Wave')\n", (1689, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1705, 1721), 'matplotlib.pyplot.plot', 'plt.plot', (['signal'], {}), '(signal)\n', (1713, 1721), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1779), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 200000]', '[threshold, threshold]', '"""k-"""'], {'lw': '(2)'}), "([0, 200000], [threshold, threshold], 'k-', lw=2)\n", (1730, 1779), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1788, 1790), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pickle
optimizer = "kfac"
cache_fname = f"/home/mscherbela/tmp/data_shared_vs_indep_{optimizer}.pkl"
with open(cache_fname, 'rb') as f:
full_plot_data = pickle.load(f)
plot_data = full_plot_data['Ethene']
colors = dict(Indep='C0', ReuseIndep='C0', ReuseShared95='C4', ReuseFromIndep='C2', ReuseFromIndepSingleLR='darkgreen', Shared_95='r')
markers = dict(Indep='o', ReuseIndep='s', ReuseShared95='s', ReuseFromIndep='s', ReuseFromIndepSingleLR='o', Shared_95='o')
labels = dict(Indep='Independent opt.', ReuseIndep='Independent opt. (20 geom.)',
ReuseShared95='Reuse from 95%-shared opt.', ReuseFromIndepSingleLR="Full-weight-reuse from indep. opt.",
Shared_95='Shared opt. (95% shared)')
titles = dict(H4p='$H_4^+$: 16 geometries', H6='$H_6$: 23 geometries', H10='$H_{10}$: 23 geometries', Ethene='Ethene: 20 geometries')
ylims = dict(H4p=6.0, H6=6.0, H10=10.0, Ethene=30.0)
show_n_samples = True
plt.close("all")
fig, ax = plt.subplots(1, 1, figsize=(7,4), dpi=200)
for curve_type in ['Indep', 'ReuseFromIndepSingleLR', 'ReuseShared95', 'Shared_95']:
ax.semilogx(plot_data[curve_type].n_epochs,
plot_data[curve_type].error_mean,
color=colors[curve_type],
marker=markers[curve_type],
label=labels[curve_type])
ax.fill_between(plot_data[curve_type].n_epochs,
plot_data[curve_type].error_25p,
plot_data[curve_type].error_75p,
alpha=0.3,
color=colors[curve_type])
ax.set_ylim([-3, 20])
ax.set_xlim([64, 16384])
ax.grid(alpha=0.5, color='gray')
ax.axhline(1.6, color='gray', linestyle='--', label="Chem. acc.: 1 kcal/mol")
# ax.set_ylim([0, ylims[molecule]])
xticks = 2 ** np.arange(6, 15, 2)
ax.set_xticks(xticks)
ax.set_xticklabels([f"{x:,d}" for x in xticks])
ax.set_xticks(2 ** np.arange(6, 15, 1), minor=True)
ax.set_ylabel("energy error / mHa")
ax.set_xlabel("training epochs / geometry")
ax.set_title("Twisted and stretched Ethene")
ax.legend(loc='upper right', framealpha=0.9, fontsize=10)
ax.minorticks_off()
fig.tight_layout()
fname = f"/home/mscherbela/ucloud/results/paper_figures/jax/reuse_from_indep_{optimizer}.png"
fig.savefig(fname, dpi=400, bbox_inches='tight')
fig.savefig(fname.replace(".png", ".pdf")) | [
"matplotlib.pyplot.close",
"pickle.load",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((992, 1008), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1001, 1008), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1062), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 4)', 'dpi': '(200)'}), '(1, 1, figsize=(7, 4), dpi=200)\n', (1031, 1062), True, 'import matplotlib.pyplot as plt\n'), ((216, 230), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (227, 230), False, 'import pickle\n'), ((1818, 1837), 'numpy.arange', 'np.arange', (['(6)', '(15)', '(2)'], {}), '(6, 15, 2)\n', (1827, 1837), True, 'import numpy as np\n'), ((1927, 1946), 'numpy.arange', 'np.arange', (['(6)', '(15)', '(1)'], {}), '(6, 15, 1)\n', (1936, 1946), True, 'import numpy as np\n')] |
'''Tests for model fitting.'''
import model_fitting
import lmfit
import numpy as np
############ CONSTANTS #############
IS_PLOT = False
NROWS = 10
NROWS_SUBSET = 5
NCOLS = 3
LENGTH = NROWS*NCOLS
INDICES = range(NROWS)
# Set to values used in model_fitting.MODEL
TEST_PARAMETERS = lmfit.Parameters()
TEST_PARAMETERS.add('k1', value=0.1, min=0, max=10)
TEST_PARAMETERS.add('k2', value=0.2, min=0, max=10)
def makeData(nrows, ncols, valFunc=None):
"""
Creates an array in the desired shape.
:param int nrows:
:param int ncols:
:param Function value: Function
argument: number of values
return: iterable of values
"""
length = nrows*ncols
if valFunc is None:
valFunc = lambda v: range(v)
data = valFunc(length)
matrix = np.reshape(data, (nrows, ncols))
return matrix
def testReshapeData():
data = makeData(NROWS, NCOLS)
array = model_fitting.reshapeData(data, range(NROWS_SUBSET))
assert(len(array) == NROWS_SUBSET*NCOLS)
assert(np.shape(array)[0] == len(array))
def testArrayDifference():
matrix1 = makeData(NROWS, NCOLS)
matrix2 = makeData(NROWS, NCOLS)
array = model_fitting.arrayDifference(matrix1, matrix2, INDICES)
assert(sum(np.abs(array)) == 0)
def testCalcRsq():
std = 0.5
residuals = np.reshape(np.random.normal(0, std, LENGTH),
(NROWS, NCOLS))
matrix1 = makeData(NROWS, NCOLS)
matrix2 = matrix1 + residuals
rsq = model_fitting.calcRsq(matrix2, matrix1)
var_est = (1 - rsq)*np.var(matrix1)
var_exp = std*std
assert(np.abs(var_est - var_exp) < 0.5)
def testMakeParameters():
constants = ['k1', 'k2', 'k3']
parameters = model_fitting.makeParameters(constants=constants)
assert(len(parameters.valuesdict()) == len(constants))
def testMakeAverageParameters():
"""
Constructs parameter values that are the average of existing parameters.
"""
list_parameters = [TEST_PARAMETERS, TEST_PARAMETERS]
average_parameters = model_fitting.makeAverageParameters(
list_parameters)
test_dict = TEST_PARAMETERS.valuesdict()
result_dict = average_parameters.valuesdict()
for name in test_dict.keys():
assert(test_dict[name] == result_dict[name])
def testRunSimulation():
data1 = model_fitting.runSimulation()
assert(data1[-1, 0] == model_fitting.SIM_TIME)
data2 = model_fitting.runSimulation(
parameters=TEST_PARAMETERS)
nrows, ncols = np.shape(data1)
for i in range(nrows):
for j in range(ncols):
assert(np.isclose(data1[i,j], data2[i,j]))
def testPlotTimeSeries():
# Smoke test only
data = model_fitting.runSimulation()
model_fitting.plotTimeSeries(data, is_plot=IS_PLOT)
model_fitting.plotTimeSeries(data, is_scatter=True, is_plot=IS_PLOT)
def testMakeObservations():
def test(num_points):
obs_data = model_fitting.makeObservations(
num_points=num_points,
road_runner=model_fitting.ROAD_RUNNER)
data = model_fitting.runSimulation(
num_points=num_points,
road_runner=model_fitting.ROAD_RUNNER)
data = data[:, 1:]
nrows, _ = np.shape(data)
assert(nrows == num_points)
std = np.sqrt(np.var(model_fitting.arrayDifference(
obs_data[:, 1:], data)))
assert(std < 3*model_fitting.NOISE_STD)
assert(std > model_fitting.NOISE_STD/3.0)
test(model_fitting.NUM_POINTS)
test(2*model_fitting.NUM_POINTS)
def testCalcSimulationResiduals():
obs_data = model_fitting.runSimulation(
parameters=TEST_PARAMETERS)
residuals = model_fitting.calcSimulationResiduals(
TEST_PARAMETERS, obs_data)
assert(sum(residuals*residuals) == 0)
def testFit():
obs_data = model_fitting.makeObservations()
parameters = model_fitting.fit(obs_data)
param_dict = dict(parameters.valuesdict())
expected_param_dict = dict(model_fitting.PARAMETERS.valuesdict())
diff = set(param_dict.keys()).symmetric_difference(
expected_param_dict.keys())
assert(len(diff) == 0)
def testCrossValidate():
obs_data = model_fitting.makeObservations(
parameters=TEST_PARAMETERS)
results_parameters, results_rsqs = model_fitting.crossValidate(
obs_data)
parameters_avg = model_fitting.makeAverageParameters(
results_parameters)
params_dict = parameters_avg.valuesdict()
for name in params_dict.keys():
assert(np.abs(params_dict[name] \
- TEST_PARAMETERS.valuesdict()[name]) < 2*params_dict[name])
def testCrossValidate2():
num_points = 20
obs_data = model_fitting.makeObservations(
parameters=TEST_PARAMETERS, num_points=num_points)
results_parameters, results_rsq = model_fitting.crossValidate(
obs_data, num_points=num_points, num_folds=10)
parameters_avg = model_fitting.makeAverageParameters(
results_parameters)
params_dict = parameters_avg.valuesdict()
for name in params_dict.keys():
assert(np.abs(params_dict[name] \
- TEST_PARAMETERS.valuesdict()[name]) < 2*params_dict[name])
def testMakeResidualsBySPecies():
num_points = 20
obs_data = model_fitting.makeObservations(
parameters=TEST_PARAMETERS, num_points=num_points)
residual_matrix = model_fitting.makeResidualsBySpecies(
obs_data, parameters=TEST_PARAMETERS, num_points=num_points)
import pdb; pdb.set_trace()
if __name__ == '__main__':
testMakeResidualsBySPecies()
if True:
testReshapeData()
testArrayDifference()
testCalcRsq()
testMakeParameters()
testMakeAverageParameters()
testRunSimulation()
testPlotTimeSeries()
testCalcSimulationResiduals()
testFit()
testCrossValidate()
testMakeObservations()
testCrossValidate2()
print("OK")
| [
"numpy.abs",
"model_fitting.calcRsq",
"numpy.shape",
"numpy.isclose",
"model_fitting.runSimulation",
"numpy.random.normal",
"model_fitting.makeAverageParameters",
"lmfit.Parameters",
"numpy.reshape",
"model_fitting.crossValidate",
"numpy.var",
"model_fitting.makeParameters",
"model_fitting.a... | [((284, 302), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (300, 302), False, 'import lmfit\n'), ((789, 821), 'numpy.reshape', 'np.reshape', (['data', '(nrows, ncols)'], {}), '(data, (nrows, ncols))\n', (799, 821), True, 'import numpy as np\n'), ((1156, 1212), 'model_fitting.arrayDifference', 'model_fitting.arrayDifference', (['matrix1', 'matrix2', 'INDICES'], {}), '(matrix1, matrix2, INDICES)\n', (1185, 1212), False, 'import model_fitting\n'), ((1436, 1475), 'model_fitting.calcRsq', 'model_fitting.calcRsq', (['matrix2', 'matrix1'], {}), '(matrix2, matrix1)\n', (1457, 1475), False, 'import model_fitting\n'), ((1652, 1701), 'model_fitting.makeParameters', 'model_fitting.makeParameters', ([], {'constants': 'constants'}), '(constants=constants)\n', (1680, 1701), False, 'import model_fitting\n'), ((1958, 2010), 'model_fitting.makeAverageParameters', 'model_fitting.makeAverageParameters', (['list_parameters'], {}), '(list_parameters)\n', (1993, 2010), False, 'import model_fitting\n'), ((2226, 2255), 'model_fitting.runSimulation', 'model_fitting.runSimulation', ([], {}), '()\n', (2253, 2255), False, 'import model_fitting\n'), ((2316, 2371), 'model_fitting.runSimulation', 'model_fitting.runSimulation', ([], {'parameters': 'TEST_PARAMETERS'}), '(parameters=TEST_PARAMETERS)\n', (2343, 2371), False, 'import model_fitting\n'), ((2397, 2412), 'numpy.shape', 'np.shape', (['data1'], {}), '(data1)\n', (2405, 2412), True, 'import numpy as np\n'), ((2570, 2599), 'model_fitting.runSimulation', 'model_fitting.runSimulation', ([], {}), '()\n', (2597, 2599), False, 'import model_fitting\n'), ((2603, 2654), 'model_fitting.plotTimeSeries', 'model_fitting.plotTimeSeries', (['data'], {'is_plot': 'IS_PLOT'}), '(data, is_plot=IS_PLOT)\n', (2631, 2654), False, 'import model_fitting\n'), ((2657, 2725), 'model_fitting.plotTimeSeries', 'model_fitting.plotTimeSeries', (['data'], {'is_scatter': '(True)', 'is_plot': 'IS_PLOT'}), '(data, is_scatter=True, is_plot=IS_PLOT)\n', (2685, 2725), False, 'import model_fitting\n'), ((3406, 3461), 'model_fitting.runSimulation', 'model_fitting.runSimulation', ([], {'parameters': 'TEST_PARAMETERS'}), '(parameters=TEST_PARAMETERS)\n', (3433, 3461), False, 'import model_fitting\n'), ((3483, 3547), 'model_fitting.calcSimulationResiduals', 'model_fitting.calcSimulationResiduals', (['TEST_PARAMETERS', 'obs_data'], {}), '(TEST_PARAMETERS, obs_data)\n', (3520, 3547), False, 'import model_fitting\n'), ((3624, 3656), 'model_fitting.makeObservations', 'model_fitting.makeObservations', ([], {}), '()\n', (3654, 3656), False, 'import model_fitting\n'), ((3672, 3699), 'model_fitting.fit', 'model_fitting.fit', (['obs_data'], {}), '(obs_data)\n', (3689, 3699), False, 'import model_fitting\n'), ((3965, 4023), 'model_fitting.makeObservations', 'model_fitting.makeObservations', ([], {'parameters': 'TEST_PARAMETERS'}), '(parameters=TEST_PARAMETERS)\n', (3995, 4023), False, 'import model_fitting\n'), ((4068, 4105), 'model_fitting.crossValidate', 'model_fitting.crossValidate', (['obs_data'], {}), '(obs_data)\n', (4095, 4105), False, 'import model_fitting\n'), ((4132, 4187), 'model_fitting.makeAverageParameters', 'model_fitting.makeAverageParameters', (['results_parameters'], {}), '(results_parameters)\n', (4167, 4187), False, 'import model_fitting\n'), ((4435, 4521), 'model_fitting.makeObservations', 'model_fitting.makeObservations', ([], {'parameters': 'TEST_PARAMETERS', 'num_points': 'num_points'}), '(parameters=TEST_PARAMETERS, num_points=\n num_points)\n', (4465, 4521), False, 'import model_fitting\n'), ((4560, 4634), 'model_fitting.crossValidate', 'model_fitting.crossValidate', (['obs_data'], {'num_points': 'num_points', 'num_folds': '(10)'}), '(obs_data, num_points=num_points, num_folds=10)\n', (4587, 4634), False, 'import model_fitting\n'), ((4661, 4716), 'model_fitting.makeAverageParameters', 'model_fitting.makeAverageParameters', (['results_parameters'], {}), '(results_parameters)\n', (4696, 4716), False, 'import model_fitting\n'), ((4972, 5058), 'model_fitting.makeObservations', 'model_fitting.makeObservations', ([], {'parameters': 'TEST_PARAMETERS', 'num_points': 'num_points'}), '(parameters=TEST_PARAMETERS, num_points=\n num_points)\n', (5002, 5058), False, 'import model_fitting\n'), ((5081, 5182), 'model_fitting.makeResidualsBySpecies', 'model_fitting.makeResidualsBySpecies', (['obs_data'], {'parameters': 'TEST_PARAMETERS', 'num_points': 'num_points'}), '(obs_data, parameters=TEST_PARAMETERS,\n num_points=num_points)\n', (5117, 5182), False, 'import model_fitting\n'), ((5200, 5215), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5213, 5215), False, 'import pdb\n'), ((1304, 1336), 'numpy.random.normal', 'np.random.normal', (['(0)', 'std', 'LENGTH'], {}), '(0, std, LENGTH)\n', (1320, 1336), True, 'import numpy as np\n'), ((1498, 1513), 'numpy.var', 'np.var', (['matrix1'], {}), '(matrix1)\n', (1504, 1513), True, 'import numpy as np\n'), ((1543, 1568), 'numpy.abs', 'np.abs', (['(var_est - var_exp)'], {}), '(var_est - var_exp)\n', (1549, 1568), True, 'import numpy as np\n'), ((2797, 2894), 'model_fitting.makeObservations', 'model_fitting.makeObservations', ([], {'num_points': 'num_points', 'road_runner': 'model_fitting.ROAD_RUNNER'}), '(num_points=num_points, road_runner=\n model_fitting.ROAD_RUNNER)\n', (2827, 2894), False, 'import model_fitting\n'), ((2918, 3012), 'model_fitting.runSimulation', 'model_fitting.runSimulation', ([], {'num_points': 'num_points', 'road_runner': 'model_fitting.ROAD_RUNNER'}), '(num_points=num_points, road_runner=\n model_fitting.ROAD_RUNNER)\n', (2945, 3012), False, 'import model_fitting\n'), ((3063, 3077), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (3071, 3077), True, 'import numpy as np\n'), ((3774, 3811), 'model_fitting.PARAMETERS.valuesdict', 'model_fitting.PARAMETERS.valuesdict', ([], {}), '()\n', (3809, 3811), False, 'import model_fitting\n'), ((1012, 1027), 'numpy.shape', 'np.shape', (['array'], {}), '(array)\n', (1020, 1027), True, 'import numpy as np\n'), ((1226, 1239), 'numpy.abs', 'np.abs', (['array'], {}), '(array)\n', (1232, 1239), True, 'import numpy as np\n'), ((2478, 2514), 'numpy.isclose', 'np.isclose', (['data1[i, j]', 'data2[i, j]'], {}), '(data1[i, j], data2[i, j])\n', (2488, 2514), True, 'import numpy as np\n'), ((3135, 3187), 'model_fitting.arrayDifference', 'model_fitting.arrayDifference', (['obs_data[:, 1:]', 'data'], {}), '(obs_data[:, 1:], data)\n', (3164, 3187), False, 'import model_fitting\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import cplane_np
import sys
import cmath
import csv
from cplane_np import ArrayComplexPlane
###
# Name: <NAME>, <NAME>
# Student ID: 01932978 ,
# Email: <EMAIL> , <EMAIL>
# Course: CS510 Fall 2017
# Assignment: Homework 06
###
def julia(c,max = 100):
def f(z):
if (abs(z) > 2):
return 1
else:
n = 1
while (abs(z) < 2):
n += 1
z = z**2 + c
if (abs(z) > 2):
return n
if max == n:
return 0
return n
return np.vectorize(f)
class JuliaPlane(ArrayComplexPlane):
complex_in = 0
def __init__(self,c):
self.complex_in = c
ArrayComplexPlane.__init__(self,-2,2,1000,-2,2,1000)
self.apply(julia(c))
def refresh(self,c):
"""this function resets self.plane to private stored variables that define the plane
and clears all functions applied by setting self.fs = [] using setPlane()
Args:
none
Return:
Null: returns nothing
"""
self.fs = []
self.__setPlane(self.xmin,self.xmax,self.xlen,self.ymin,self.ymax,self.ylen)
self.apply(julia(c))
return
def toCSV(self,filename):
"""exports transformed plane of integers to csv files """
print("here1")
with open(filename, 'wb') as csvfile:
print("here2")
writer = csv.writer(csvfile, delimiter=',')
print("here3")
float_list = [self.xmin , self.xmax , self.xlen , self.ymin , self.ymax , self.ylen , self.complex_in]
print(float_list)
writer.writerow(float_list)
#writer.writerow([self.plane])
pass
def fromCSV(self,filename):
"""imprts a csv file, and reset the plane parameters to those of the file, and refressh the plane array to the vals in the csv directly"""
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
line1 = next(reader)
self.__setPlane(line1[0],line1[1],line1[2],line1[3],line1[4],line1[5])
self.apply(julia(line1[6]))
pass
x = 5
y = 2
z = complex(x,y)
myplane = JuliaPlane(z)
#myplane.printPlane()
myplane.toCSV("plane.csv")
myplane.fromCSV("plane.csv")
| [
"csv.reader",
"numpy.vectorize",
"csv.writer",
"cplane_np.ArrayComplexPlane.__init__"
] | [((639, 654), 'numpy.vectorize', 'np.vectorize', (['f'], {}), '(f)\n', (651, 654), True, 'import numpy as np\n'), ((775, 833), 'cplane_np.ArrayComplexPlane.__init__', 'ArrayComplexPlane.__init__', (['self', '(-2)', '(2)', '(1000)', '(-2)', '(2)', '(1000)'], {}), '(self, -2, 2, 1000, -2, 2, 1000)\n', (801, 833), False, 'from cplane_np import ArrayComplexPlane\n'), ((1515, 1549), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1525, 1549), False, 'import csv\n'), ((2065, 2114), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=',', quotechar='|')\n", (2075, 2114), False, 'import csv\n')] |
import logging
from operator import itemgetter
import pandas as pd
from scipy.stats import hypergeom
import numpy as np
class ScoringFunction():
def __init__(self):
pass
def calc_hypergeom(self,
aligned_peaks,
theoretical_spectrum,
experimental_spectrum,
num_bins,
log10_transform=True):
num_aligned_peaks = len(aligned_peaks)
num_exp_peaks = len(experimental_spectrum)
num_theor_peaks = len(theoretical_spectrum)
hyper = hypergeom(num_bins,num_theor_peaks,num_exp_peaks)
if log10_transform: return(np.log10(hyper.pmf(np.arange(0,num_aligned_peaks+1))[-1])*-1)
else: return(hyper.pmf(np.arange(0,num_aligned_peaks+1))[-1])
def calc_intensity_explained(self,
aligned_peaks,
theoretical_spectrum,
experimental_spectrum):
explained_intensity = 0.0
already_assigned = []
for ap in aligned_peaks:
if ap[1][0] in already_assigned:
continue
explained_intensity += ap[1][1]
already_assigned.append(ap[1][0])
total_intensity_exp = sum(experimental_spectrum)
return((explained_intensity/total_intensity_exp)*100)
def ms1_error_perc(self,ppm_error):
pass
def ms2_error_perc(self,ppm_error):
pass | [
"scipy.stats.hypergeom",
"numpy.arange"
] | [((452, 503), 'scipy.stats.hypergeom', 'hypergeom', (['num_bins', 'num_theor_peaks', 'num_exp_peaks'], {}), '(num_bins, num_theor_peaks, num_exp_peaks)\n', (461, 503), False, 'from scipy.stats import hypergeom\n'), ((619, 654), 'numpy.arange', 'np.arange', (['(0)', '(num_aligned_peaks + 1)'], {}), '(0, num_aligned_peaks + 1)\n', (628, 654), True, 'import numpy as np\n'), ((551, 586), 'numpy.arange', 'np.arange', (['(0)', '(num_aligned_peaks + 1)'], {}), '(0, num_aligned_peaks + 1)\n', (560, 586), True, 'import numpy as np\n')] |
import logging
import secrets
import numpy as np
from .. import util
from ..util.errors import NumericalPrecisionError
class Coreset(object):
def __init__(self, initial_wts_sz=1000):
self.alg_name = self.__class__.__name__ + '-' + secrets.token_hex(3)
self.log = logging.LoggerAdapter(logging.getLogger(), {"id": self.alg_name})
self.reached_numeric_limit = False
self.nwts = 0
# internal reps of wts and idcs
self._wts = np.zeros(initial_wts_sz)
self._idcs = np.zeros(initial_wts_sz, dtype=np.int64)
# outward facing views
self.wts = self._wts[:self.nwts]
self.idcs = self._idcs[:self.nwts]
def reset(self):
# don't bother resetting wts, just set the nwts to 0
self.nwts = 0
self.wts = self._wts[:self.nwts]
self.idcs = self._idcs[:self.nwts]
self.reached_numeric_limit = False
def size(self):
return (self.wts > 0).sum()
def weights(self):
return self.wts[self.wts > 0], self.idcs[self.wts > 0]
def _refresh_views(self):
self.wts = self._wts[:self.nwts]
self.idcs = self._idcs[:self.nwts]
def _double_internal(self):
self.wts = None
self.idcs = None
self._wts.resize(self._wts.shape[0] * 2)
self._idcs.resize(self._idcs.shape[0] * 2)
self._refresh_views()
# overwrite any wts at __idcs (keeping old values if unmodified), append any new ones
def _update(self, __wts, __idcs):
__idcs = np.atleast_1d(__idcs)
__wts = np.atleast_1d(__wts)
if __idcs.shape[0] != __wts.shape[0]:
raise ValueError(self.alg_name + '._set(): new idcs and wts must have the same shape. idcs.shape = ' + str(
__idcs.shape[0]) + ' wts.shape = ' + str(__wts.shape[0]))
if np.any(__wts < 0) or np.any(__idcs < 0) or not np.issubdtype(__idcs.dtype, np.integer):
raise ValueError(
self.alg_name + '._set(): new weights + idcs must be nonnegative, and new idcs must have integer type. any(wts < 0) = ' + str(
np.any(__wts < 0)) + ' any(idcs < 0) = ' + str(np.any(__idcs < 0)) + ' dtype = ' + str(
__idcs.dtype) + ' idcs = ' + str(__idcs) + ' wts = ' + str(__wts))
# get intersection, overwrite
inter, i1, i2 = np.intersect1d(self.idcs, __idcs, return_indices=True)
self.wts[i1] = __wts[i2]
# get difference, append, resizing if necessary
idiff = np.setdiff1d(np.arange(__idcs.shape[0]), i2)
while self.nwts + idiff.shape[0] > self._wts.shape[0]:
self._double_internal()
self._idcs[self.nwts:self.nwts + idiff.shape[0]] = __idcs[idiff]
self._wts[self.nwts:self.nwts + idiff.shape[0]] = __wts[idiff]
self.nwts += idiff.shape[0]
# create views
self._refresh_views()
# completely overwrite; forget any previous weight settings
def _overwrite(self, __wts, __idcs):
__idcs = np.atleast_1d(__idcs)
__wts = np.atleast_1d(__wts)
if __idcs.shape[0] != __wts.shape[0]:
raise ValueError(self.alg_name + '._set(): new idcs and wts must have the same shape. idcs.shape = ' + str(
__idcs.shape[0]) + ' wts.shape = ' + str(__wts.shape[0]))
if np.any(__wts < 0) or np.any(__idcs < 0) or not np.issubdtype(__idcs.dtype, np.integer):
raise ValueError(
self.alg_name + '._set(): new weights + idcs must be nonnegative, and new idcs must have integer type. any(wts < 0) = ' + str(
np.any(__wts < 0)) + ' any(idcs < 0) = ' + str(np.any(__idcs < 0)) + ' dtype = ' + str(
__idcs.dtype) + ' idcs = ' + str(__idcs))
# full overwrite
while __wts.shape[0] > self._wts.shape[0]:
self._double_internal()
self._wts[:__wts.shape[0]] = __wts
self._idcs[:__idcs.shape[0]] = __idcs
self.nwts = __wts.shape[0]
self._refresh_views()
def error(self):
raise NotImplementedError()
# build of desired size sz using at most itrs iterations
# always returns a coreset of size <= sz
def build(self, itrs, sz):
if self.reached_numeric_limit:
return
if sz < self.size():
raise ValueError(
self.alg_name + '.build(): requested coreset of size < the current size, but cannot shrink coresets; returning. Requested size = ' + str(
sz) + ' current size = ' + str(self.size()))
self._build(itrs, sz)
# if we reached numeric limit during the current build, warn
if self.reached_numeric_limit:
self.log.warning('the numeric limit has been reached. No more points will be added. size = ' + str(
self.size()) + ', error = ' + str(self.error()))
# can run after building coreset to re-solve only the weight opt, not the combinatorial selection problem
def optimize(self):
try:
prev_cost = self.error()
old_wts = self.wts.copy()
old_idcs = self.idcs.copy()
self._optimize()
new_cost = self.error()
if new_cost > prev_cost * (1. + util.TOL):
raise NumericalPrecisionError(
'self.optimize() returned a solution with increasing error. Numeric limit possibly reached: preverr = ' + str(
prev_cost) + ' err = ' + str(new_cost) + '.\n \
If the two errors are very close, try running bc.util.tolerance(tol) with tol > current tol = ' + str(
util.TOL) + ' before running')
except NumericalPrecisionError as e:
self.log.warning(e)
self._overwrite(old_wts, old_idcs)
self.reached_numeric_limit = True
return
def _optimize(self):
raise NotImplementedError
def _build(self, itrs, sz):
raise NotImplementedError
| [
"numpy.atleast_1d",
"numpy.zeros",
"secrets.token_hex",
"numpy.issubdtype",
"numpy.any",
"numpy.arange",
"numpy.intersect1d",
"logging.getLogger"
] | [((478, 502), 'numpy.zeros', 'np.zeros', (['initial_wts_sz'], {}), '(initial_wts_sz)\n', (486, 502), True, 'import numpy as np\n'), ((524, 564), 'numpy.zeros', 'np.zeros', (['initial_wts_sz'], {'dtype': 'np.int64'}), '(initial_wts_sz, dtype=np.int64)\n', (532, 564), True, 'import numpy as np\n'), ((1529, 1550), 'numpy.atleast_1d', 'np.atleast_1d', (['__idcs'], {}), '(__idcs)\n', (1542, 1550), True, 'import numpy as np\n'), ((1567, 1587), 'numpy.atleast_1d', 'np.atleast_1d', (['__wts'], {}), '(__wts)\n', (1580, 1587), True, 'import numpy as np\n'), ((2357, 2411), 'numpy.intersect1d', 'np.intersect1d', (['self.idcs', '__idcs'], {'return_indices': '(True)'}), '(self.idcs, __idcs, return_indices=True)\n', (2371, 2411), True, 'import numpy as np\n'), ((3020, 3041), 'numpy.atleast_1d', 'np.atleast_1d', (['__idcs'], {}), '(__idcs)\n', (3033, 3041), True, 'import numpy as np\n'), ((3058, 3078), 'numpy.atleast_1d', 'np.atleast_1d', (['__wts'], {}), '(__wts)\n', (3071, 3078), True, 'import numpy as np\n'), ((247, 267), 'secrets.token_hex', 'secrets.token_hex', (['(3)'], {}), '(3)\n', (264, 267), False, 'import secrets\n'), ((309, 328), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (326, 328), False, 'import logging\n'), ((1839, 1856), 'numpy.any', 'np.any', (['(__wts < 0)'], {}), '(__wts < 0)\n', (1845, 1856), True, 'import numpy as np\n'), ((1860, 1878), 'numpy.any', 'np.any', (['(__idcs < 0)'], {}), '(__idcs < 0)\n', (1866, 1878), True, 'import numpy as np\n'), ((2532, 2558), 'numpy.arange', 'np.arange', (['__idcs.shape[0]'], {}), '(__idcs.shape[0])\n', (2541, 2558), True, 'import numpy as np\n'), ((3330, 3347), 'numpy.any', 'np.any', (['(__wts < 0)'], {}), '(__wts < 0)\n', (3336, 3347), True, 'import numpy as np\n'), ((3351, 3369), 'numpy.any', 'np.any', (['(__idcs < 0)'], {}), '(__idcs < 0)\n', (3357, 3369), True, 'import numpy as np\n'), ((1886, 1925), 'numpy.issubdtype', 'np.issubdtype', (['__idcs.dtype', 'np.integer'], {}), '(__idcs.dtype, np.integer)\n', (1899, 1925), True, 'import numpy as np\n'), ((3377, 3416), 'numpy.issubdtype', 'np.issubdtype', (['__idcs.dtype', 'np.integer'], {}), '(__idcs.dtype, np.integer)\n', (3390, 3416), True, 'import numpy as np\n'), ((3658, 3676), 'numpy.any', 'np.any', (['(__idcs < 0)'], {}), '(__idcs < 0)\n', (3664, 3676), True, 'import numpy as np\n'), ((2167, 2185), 'numpy.any', 'np.any', (['(__idcs < 0)'], {}), '(__idcs < 0)\n', (2173, 2185), True, 'import numpy as np\n'), ((3611, 3628), 'numpy.any', 'np.any', (['(__wts < 0)'], {}), '(__wts < 0)\n', (3617, 3628), True, 'import numpy as np\n'), ((2120, 2137), 'numpy.any', 'np.any', (['(__wts < 0)'], {}), '(__wts < 0)\n', (2126, 2137), True, 'import numpy as np\n')] |
import torch
import numpy as np
from scipy.io import wavfile
from torch_pitch_shift import *
# read an audio file
SAMPLE_RATE, sample = wavfile.read("./wavs/test.wav")
# convert to tensor of shape (batch_size, channels, samples)
dtype = sample.dtype
sample = torch.tensor(
[np.swapaxes(sample, 0, 1)], # (samples, channels) --> (channels, samples)
dtype=torch.float32,
device="cuda" if torch.cuda.is_available() else "cpu",
)
def test_pitch_shift_12_up():
# pitch up by 12 semitones
up = pitch_shift(sample, 12, SAMPLE_RATE)
assert up.shape == sample.shape
wavfile.write(
"./wavs/shifted_octave_+1.wav",
SAMPLE_RATE,
np.swapaxes(up.cpu()[0].numpy(), 0, 1).astype(dtype),
)
def test_pitch_shift_12_down():
# pitch down by 12 semitones
down = pitch_shift(sample, -12, SAMPLE_RATE)
assert down.shape == sample.shape
wavfile.write(
"./wavs/shifted_octave_-1.wav",
SAMPLE_RATE,
np.swapaxes(down.cpu()[0].numpy(), 0, 1).astype(dtype),
)
def test_pitch_shift_to_fast_ratios():
# get shift ratios that are fast (between +1 and -1 octaves)
for ratio in get_fast_shifts(SAMPLE_RATE):
print("Shifting", ratio)
shifted = pitch_shift(sample, ratio, SAMPLE_RATE)
assert shifted.shape == sample.shape
wavfile.write(
f"./wavs/shifted_ratio_{ratio.numerator}-{ratio.denominator}.wav",
SAMPLE_RATE,
np.swapaxes(shifted.cpu()[0].numpy(), 0, 1).astype(dtype),
)
| [
"torch.cuda.is_available",
"numpy.swapaxes",
"scipy.io.wavfile.read"
] | [((137, 168), 'scipy.io.wavfile.read', 'wavfile.read', (['"""./wavs/test.wav"""'], {}), "('./wavs/test.wav')\n", (149, 168), False, 'from scipy.io import wavfile\n'), ((280, 305), 'numpy.swapaxes', 'np.swapaxes', (['sample', '(0)', '(1)'], {}), '(sample, 0, 1)\n', (291, 305), True, 'import numpy as np\n'), ((401, 426), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (424, 426), False, 'import torch\n')] |
# Gmsh - Copyright (C) 1997-2019 <NAME>, <NAME>
#
# See the LICENSE.txt file for license information. Please report all
# issues on https://gitlab.onelab.info/gmsh/gmsh/issues.
# This file defines the Gmsh Python API (v4.4).
#
# Do not edit it directly: it is automatically generated by `api/gen.py'.
#
# By design, the Gmsh Python API is purely functional, and only uses elementary
# types (as well as `numpy' arrays if `numpy' is avaiable). See `demos/api' for
# examples.
from ctypes import *
from ctypes.util import find_library
import signal
import os
import platform
from math import pi
GMSH_API_VERSION = "4.4"
GMSH_API_VERSION_MAJOR = 4
GMSH_API_VERSION_MINOR = 4
__version__ = GMSH_API_VERSION
signal.signal(signal.SIGINT, signal.SIG_DFL)
libdir = os.path.dirname(os.path.realpath(__file__))
if platform.system() == "Windows":
libpath = os.path.join(libdir, "gmsh-4.4.dll")
elif platform.system() == "Darwin":
libpath = os.path.join(libdir, "libgmsh.dylib")
else:
libpath = os.path.join(libdir, "libgmsh.so")
if not os.path.exists(libpath):
libpath = find_library("gmsh")
if not libpath:
try:
user_paths = os.environ['gmsh'].split(os.pathsep)
except KeyError:
raise
lib = CDLL(libpath)
use_numpy = False
try:
import numpy
try:
from weakref import finalize as weakreffinalize
except:
from backports.weakref import finalize as weakreffinalize
use_numpy = True
except:
pass
# Utility functions, not part of the Gmsh Python API
def _ostring(s):
sp = s.value.decode("utf-8")
lib.gmshFree(s)
return sp
def _ovectorpair(ptr, size):
v = list((ptr[i * 2], ptr[i * 2 + 1]) for i in range(size//2))
lib.gmshFree(ptr)
return v
def _ovectorint(ptr, size):
if use_numpy:
v = numpy.ctypeslib.as_array(ptr, (size, ))
weakreffinalize(v, lib.gmshFree, ptr)
else:
v = list(ptr[i] for i in range(size))
lib.gmshFree(ptr)
return v
def _ovectorsize(ptr, size):
if use_numpy:
v = numpy.ctypeslib.as_array(ptr, (size, ))
weakreffinalize(v, lib.gmshFree, ptr)
else:
v = list(ptr[i] for i in range(size))
lib.gmshFree(ptr)
return v
def _ovectordouble(ptr, size):
if use_numpy:
v = numpy.ctypeslib.as_array(ptr, (size, ))
weakreffinalize(v, lib.gmshFree, ptr)
else:
v = list(ptr[i] for i in range(size))
lib.gmshFree(ptr)
return v
def _ovectorstring(ptr, size):
v = list(_ostring(cast(ptr[i], c_char_p)) for i in range(size))
lib.gmshFree(ptr)
return v
def _ovectorvectorint(ptr, size, n):
v = [_ovectorint(pointer(ptr[i].contents), size[i]) for i in range(n.value)]
lib.gmshFree(size)
lib.gmshFree(ptr)
return v
def _ovectorvectorsize(ptr, size, n):
v = [_ovectorsize(pointer(ptr[i].contents), size[i]) for i in range(n.value)]
lib.gmshFree(size)
lib.gmshFree(ptr)
return v
def _ovectorvectordouble(ptr, size, n):
v = [_ovectordouble(pointer(ptr[i].contents), size[i]) for i in range(n.value)]
lib.gmshFree(size)
lib.gmshFree(ptr)
return v
def _ovectorvectorpair(ptr, size, n):
v = [_ovectorpair(pointer(ptr[i].contents), size[i]) for i in range(n.value)]
lib.gmshFree(size)
lib.gmshFree(ptr)
return v
def _ivectorint(o):
if use_numpy:
return numpy.ascontiguousarray(o, numpy.int32).ctypes, c_size_t(len(o))
else:
return (c_int * len(o))(*o), c_size_t(len(o))
def _ivectorsize(o):
if use_numpy:
return numpy.ascontiguousarray(o, numpy.uintp).ctypes, c_size_t(len(o))
else:
return (c_size_t * len(o))(*o), c_size_t(len(o))
def _ivectordouble(o):
if use_numpy:
array = numpy.ascontiguousarray(o, numpy.float64)
ct = array.ctypes
ct.array = array
return ct, c_size_t(len(o))
else:
return (c_double * len(o))(*o), c_size_t(len(o))
def _ivectorpair(o):
if use_numpy:
array = numpy.ascontiguousarray(o, numpy.int32)
ct = array.ctypes
ct.array = array
return ct, c_size_t(len(o) * 2)
else:
return ((c_int * 2) * len(o))(*o), c_size_t(len(o) * 2)
def _ivectorstring(o):
return (c_char_p * len(o))(*(s.encode() for s in o)), c_size_t(len(o))
def _ivectorvectorint(os):
n = len(os)
parrays = [_ivectorint(o) for o in os]
sizes = (c_size_t * n)(*(a[1] for a in parrays))
arrays = (POINTER(c_int) * n)(*(cast(a[0], POINTER(c_int)) for a in parrays))
arrays.ref = [a[0] for a in parrays]
size = c_size_t(n)
return arrays, sizes, size
def _ivectorvectorsize(os):
n = len(os)
parrays = [_ivectorsize(o) for o in os]
sizes = (c_size_t * n)(*(a[1] for a in parrays))
arrays = (POINTER(c_size_t) * n)(*(cast(a[0], POINTER(c_size_t)) for a in parrays))
arrays.ref = [a[0] for a in parrays]
size = c_size_t(n)
return arrays, sizes, size
def _ivectorvectordouble(os):
n = len(os)
parrays = [_ivectordouble(o) for o in os]
sizes = (c_size_t * n)(*(a[1] for a in parrays))
arrays = (POINTER(c_double) * n)(*(cast(a[0], POINTER(c_double)) for a in parrays))
arrays.ref = [a[0] for a in parrays]
size = c_size_t(n)
return arrays, sizes, size
def _iargcargv(o):
return c_int(len(o)), (c_char_p * len(o))(*(s.encode() for s in o))
# Gmsh Python API begins here
def initialize(argv=[], readConfigFiles=True):
"""
Initialize Gmsh. This must be called before any call to the other functions
in the API. If `argc' and `argv' (or just `argv' in Python or Julia) are
provided, they will be handled in the same way as the command line
arguments in the Gmsh app. If `readConfigFiles' is set, read system Gmsh
configuration files (gmshrc and gmsh-options).
"""
api_argc_, api_argv_ = _iargcargv(argv)
ierr = c_int()
lib.gmshInitialize(
api_argc_, api_argv_,
c_int(bool(readConfigFiles)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshInitialize returned non-zero error code: ",
ierr.value)
def finalize():
"""
Finalize Gmsh. This must be called when you are done using the Gmsh API.
"""
ierr = c_int()
lib.gmshFinalize(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFinalize returned non-zero error code: ",
ierr.value)
def open(fileName):
"""
Open a file. Equivalent to the `File->Open' menu in the Gmsh app. Handling
of the file depends on its extension and/or its contents: opening a file
with model data will create a new model.
"""
ierr = c_int()
lib.gmshOpen(
c_char_p(fileName.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOpen returned non-zero error code: ",
ierr.value)
def merge(fileName):
"""
Merge a file. Equivalent to the `File->Merge' menu in the Gmsh app.
Handling of the file depends on its extension and/or its contents. Merging
a file with model data will add the data to the current model.
"""
ierr = c_int()
lib.gmshMerge(
c_char_p(fileName.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshMerge returned non-zero error code: ",
ierr.value)
def write(fileName):
"""
Write a file. The export format is determined by the file extension.
"""
ierr = c_int()
lib.gmshWrite(
c_char_p(fileName.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshWrite returned non-zero error code: ",
ierr.value)
def clear():
"""
Clear all loaded models and post-processing data, and add a new empty
model.
"""
ierr = c_int()
lib.gmshClear(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshClear returned non-zero error code: ",
ierr.value)
class option:
"""
Option handling functions
"""
@staticmethod
def setNumber(name, value):
"""
Set a numerical option to `value'. `name' is of the form "category.option"
or "category[num].option". Available categories and options are listed in
the Gmsh reference manual.
"""
ierr = c_int()
lib.gmshOptionSetNumber(
c_char_p(name.encode()),
c_double(value),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOptionSetNumber returned non-zero error code: ",
ierr.value)
@staticmethod
def getNumber(name):
"""
Get the `value' of a numerical option. `name' is of the form
"category.option" or "category[num].option". Available categories and
options are listed in the Gmsh reference manual.
Return `value'.
"""
api_value_ = c_double()
ierr = c_int()
lib.gmshOptionGetNumber(
c_char_p(name.encode()),
byref(api_value_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOptionGetNumber returned non-zero error code: ",
ierr.value)
return api_value_.value
@staticmethod
def setString(name, value):
"""
Set a string option to `value'. `name' is of the form "category.option" or
"category[num].option". Available categories and options are listed in the
Gmsh reference manual.
"""
ierr = c_int()
lib.gmshOptionSetString(
c_char_p(name.encode()),
c_char_p(value.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOptionSetString returned non-zero error code: ",
ierr.value)
@staticmethod
def getString(name):
"""
Get the `value' of a string option. `name' is of the form "category.option"
or "category[num].option". Available categories and options are listed in
the Gmsh reference manual.
Return `value'.
"""
api_value_ = c_char_p()
ierr = c_int()
lib.gmshOptionGetString(
c_char_p(name.encode()),
byref(api_value_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOptionGetString returned non-zero error code: ",
ierr.value)
return _ostring(api_value_)
@staticmethod
def setColor(name, r, g, b, a=0):
"""
Set a color option to the RGBA value (`r', `g', `b', `a'), where where `r',
`g', `b' and `a' should be integers between 0 and 255. `name' is of the
form "category.option" or "category[num].option". Available categories and
options are listed in the Gmsh reference manual, with the "Color." middle
string removed.
"""
ierr = c_int()
lib.gmshOptionSetColor(
c_char_p(name.encode()),
c_int(r),
c_int(g),
c_int(b),
c_int(a),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOptionSetColor returned non-zero error code: ",
ierr.value)
@staticmethod
def getColor(name):
"""
Get the `r', `g', `b', `a' value of a color option. `name' is of the form
"category.option" or "category[num].option". Available categories and
options are listed in the Gmsh reference manual, with the "Color." middle
string removed.
Return `r', `g', `b', `a'.
"""
api_r_ = c_int()
api_g_ = c_int()
api_b_ = c_int()
api_a_ = c_int()
ierr = c_int()
lib.gmshOptionGetColor(
c_char_p(name.encode()),
byref(api_r_),
byref(api_g_),
byref(api_b_),
byref(api_a_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOptionGetColor returned non-zero error code: ",
ierr.value)
return (
api_r_.value,
api_g_.value,
api_b_.value,
api_a_.value)
class model:
"""
Model functions
"""
@staticmethod
def add(name):
"""
Add a new model, with name `name', and set it as the current model.
"""
ierr = c_int()
lib.gmshModelAdd(
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelAdd returned non-zero error code: ",
ierr.value)
@staticmethod
def remove():
"""
Remove the current model.
"""
ierr = c_int()
lib.gmshModelRemove(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelRemove returned non-zero error code: ",
ierr.value)
@staticmethod
def list():
"""
List the names of all models.
Return `names'.
"""
api_names_, api_names_n_ = POINTER(POINTER(c_char))(), c_size_t()
ierr = c_int()
lib.gmshModelList(
byref(api_names_), byref(api_names_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelList returned non-zero error code: ",
ierr.value)
return _ovectorstring(api_names_, api_names_n_.value)
@staticmethod
def setCurrent(name):
"""
Set the current model to the model with name `name'. If several models have
the same name, select the one that was added first.
"""
ierr = c_int()
lib.gmshModelSetCurrent(
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelSetCurrent returned non-zero error code: ",
ierr.value)
@staticmethod
def getEntities(dim=-1):
"""
Get all the entities in the current model. If `dim' is >= 0, return only
the entities of the specified dimension (e.g. points if `dim' == 0). The
entities are returned as a vector of (dim, tag) integer pairs.
Return `dimTags'.
"""
api_dimTags_, api_dimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGetEntities(
byref(api_dimTags_), byref(api_dimTags_n_),
c_int(dim),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetEntities returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_dimTags_, api_dimTags_n_.value)
@staticmethod
def setEntityName(dim, tag, name):
"""
Set the name of the entity of dimension `dim' and tag `tag'.
"""
ierr = c_int()
lib.gmshModelSetEntityName(
c_int(dim),
c_int(tag),
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelSetEntityName returned non-zero error code: ",
ierr.value)
@staticmethod
def getEntityName(dim, tag):
"""
Get the name of the entity of dimension `dim' and tag `tag'.
Return `name'.
"""
api_name_ = c_char_p()
ierr = c_int()
lib.gmshModelGetEntityName(
c_int(dim),
c_int(tag),
byref(api_name_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetEntityName returned non-zero error code: ",
ierr.value)
return _ostring(api_name_)
@staticmethod
def getPhysicalGroups(dim=-1):
"""
Get all the physical groups in the current model. If `dim' is >= 0, return
only the entities of the specified dimension (e.g. physical points if `dim'
== 0). The entities are returned as a vector of (dim, tag) integer pairs.
Return `dimTags'.
"""
api_dimTags_, api_dimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGetPhysicalGroups(
byref(api_dimTags_), byref(api_dimTags_n_),
c_int(dim),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetPhysicalGroups returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_dimTags_, api_dimTags_n_.value)
@staticmethod
def getEntitiesForPhysicalGroup(dim, tag):
"""
Get the tags of the model entities making up the physical group of
dimension `dim' and tag `tag'.
Return `tags'.
"""
api_tags_, api_tags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGetEntitiesForPhysicalGroup(
c_int(dim),
c_int(tag),
byref(api_tags_), byref(api_tags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetEntitiesForPhysicalGroup returned non-zero error code: ",
ierr.value)
return _ovectorint(api_tags_, api_tags_n_.value)
@staticmethod
def getPhysicalGroupsForEntity(dim, tag):
"""
Get the tags of the physical groups (if any) to which the model entity of
dimension `dim' and tag `tag' belongs.
Return `physicalTags'.
"""
api_physicalTags_, api_physicalTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGetPhysicalGroupsForEntity(
c_int(dim),
c_int(tag),
byref(api_physicalTags_), byref(api_physicalTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetPhysicalGroupsForEntity returned non-zero error code: ",
ierr.value)
return _ovectorint(api_physicalTags_, api_physicalTags_n_.value)
@staticmethod
def addPhysicalGroup(dim, tags, tag=-1):
"""
Add a physical group of dimension `dim', grouping the model entities with
tags `tags'. Return the tag of the physical group, equal to `tag' if `tag'
is positive, or a new tag if `tag' < 0.
Return an integer value.
"""
api_tags_, api_tags_n_ = _ivectorint(tags)
ierr = c_int()
api__result__ = lib.gmshModelAddPhysicalGroup(
c_int(dim),
api_tags_, api_tags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelAddPhysicalGroup returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def setPhysicalName(dim, tag, name):
"""
Set the name of the physical group of dimension `dim' and tag `tag'.
"""
ierr = c_int()
lib.gmshModelSetPhysicalName(
c_int(dim),
c_int(tag),
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelSetPhysicalName returned non-zero error code: ",
ierr.value)
@staticmethod
def getPhysicalName(dim, tag):
"""
Get the name of the physical group of dimension `dim' and tag `tag'.
Return `name'.
"""
api_name_ = c_char_p()
ierr = c_int()
lib.gmshModelGetPhysicalName(
c_int(dim),
c_int(tag),
byref(api_name_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetPhysicalName returned non-zero error code: ",
ierr.value)
return _ostring(api_name_)
@staticmethod
def getBoundary(dimTags, combined=True, oriented=True, recursive=False):
"""
Get the boundary of the model entities `dimTags'. Return in `outDimTags'
the boundary of the individual entities (if `combined' is false) or the
boundary of the combined geometrical shape formed by all input entities (if
`combined' is true). Return tags multiplied by the sign of the boundary
entity if `oriented' is true. Apply the boundary operator recursively down
to dimension 0 (i.e. to points) if `recursive' is true.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGetBoundary(
api_dimTags_, api_dimTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
c_int(bool(combined)),
c_int(bool(oriented)),
c_int(bool(recursive)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetBoundary returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def getEntitiesInBoundingBox(xmin, ymin, zmin, xmax, ymax, zmax, dim=-1):
"""
Get the model entities in the bounding box defined by the two points
(`xmin', `ymin', `zmin') and (`xmax', `ymax', `zmax'). If `dim' is >= 0,
return only the entities of the specified dimension (e.g. points if `dim'
== 0).
Return `tags'.
"""
api_tags_, api_tags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGetEntitiesInBoundingBox(
c_double(xmin),
c_double(ymin),
c_double(zmin),
c_double(xmax),
c_double(ymax),
c_double(zmax),
byref(api_tags_), byref(api_tags_n_),
c_int(dim),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetEntitiesInBoundingBox returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_tags_, api_tags_n_.value)
@staticmethod
def getBoundingBox(dim, tag):
"""
Get the bounding box (`xmin', `ymin', `zmin'), (`xmax', `ymax', `zmax') of
the model entity of dimension `dim' and tag `tag'. If `dim' and `tag' are
negative, get the bounding box of the whole model.
Return `xmin', `ymin', `zmin', `xmax', `ymax', `zmax'.
"""
api_xmin_ = c_double()
api_ymin_ = c_double()
api_zmin_ = c_double()
api_xmax_ = c_double()
api_ymax_ = c_double()
api_zmax_ = c_double()
ierr = c_int()
lib.gmshModelGetBoundingBox(
c_int(dim),
c_int(tag),
byref(api_xmin_),
byref(api_ymin_),
byref(api_zmin_),
byref(api_xmax_),
byref(api_ymax_),
byref(api_zmax_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetBoundingBox returned non-zero error code: ",
ierr.value)
return (
api_xmin_.value,
api_ymin_.value,
api_zmin_.value,
api_xmax_.value,
api_ymax_.value,
api_zmax_.value)
@staticmethod
def getDimension():
"""
Get the geometrical dimension of the current model.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelGetDimension(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetDimension returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addDiscreteEntity(dim, tag=-1, boundary=[]):
"""
Add a discrete model entity (defined by a mesh) of dimension `dim' in the
current model. Return the tag of the new discrete entity, equal to `tag' if
`tag' is positive, or a new tag if `tag' < 0. `boundary' specifies the tags
of the entities on the boundary of the discrete entity, if any. Specifying
`boundary' allows Gmsh to construct the topology of the overall model.
Return an integer value.
"""
api_boundary_, api_boundary_n_ = _ivectorint(boundary)
ierr = c_int()
api__result__ = lib.gmshModelAddDiscreteEntity(
c_int(dim),
c_int(tag),
api_boundary_, api_boundary_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelAddDiscreteEntity returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def removeEntities(dimTags, recursive=False):
"""
Remove the entities `dimTags' of the current model. If `recursive' is true,
remove all the entities on their boundaries, down to dimension 0.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelRemoveEntities(
api_dimTags_, api_dimTags_n_,
c_int(bool(recursive)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelRemoveEntities returned non-zero error code: ",
ierr.value)
@staticmethod
def removeEntityName(name):
"""
Remove the entity name `name' from the current model.
"""
ierr = c_int()
lib.gmshModelRemoveEntityName(
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelRemoveEntityName returned non-zero error code: ",
ierr.value)
@staticmethod
def removePhysicalGroups(dimTags=[]):
"""
Remove the physical groups `dimTags' of the current model. If `dimTags' is
empty, remove all groups.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelRemovePhysicalGroups(
api_dimTags_, api_dimTags_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelRemovePhysicalGroups returned non-zero error code: ",
ierr.value)
@staticmethod
def removePhysicalName(name):
"""
Remove the physical name `name' from the current model.
"""
ierr = c_int()
lib.gmshModelRemovePhysicalName(
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelRemovePhysicalName returned non-zero error code: ",
ierr.value)
@staticmethod
def getType(dim, tag):
"""
Get the type of the entity of dimension `dim' and tag `tag'.
Return `entityType'.
"""
api_entityType_ = c_char_p()
ierr = c_int()
lib.gmshModelGetType(
c_int(dim),
c_int(tag),
byref(api_entityType_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetType returned non-zero error code: ",
ierr.value)
return _ostring(api_entityType_)
@staticmethod
def getParent(dim, tag):
"""
In a partitioned model, get the parent of the entity of dimension `dim' and
tag `tag', i.e. from which the entity is a part of, if any. `parentDim' and
`parentTag' are set to -1 if the entity has no parent.
Return `parentDim', `parentTag'.
"""
api_parentDim_ = c_int()
api_parentTag_ = c_int()
ierr = c_int()
lib.gmshModelGetParent(
c_int(dim),
c_int(tag),
byref(api_parentDim_),
byref(api_parentTag_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetParent returned non-zero error code: ",
ierr.value)
return (
api_parentDim_.value,
api_parentTag_.value)
@staticmethod
def getPartitions(dim, tag):
"""
In a partitioned model, return the tags of the partition(s) to which the
entity belongs.
Return `partitions'.
"""
api_partitions_, api_partitions_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGetPartitions(
c_int(dim),
c_int(tag),
byref(api_partitions_), byref(api_partitions_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetPartitions returned non-zero error code: ",
ierr.value)
return _ovectorint(api_partitions_, api_partitions_n_.value)
@staticmethod
def getValue(dim, tag, parametricCoord):
"""
Evaluate the parametrization of the entity of dimension `dim' and tag `tag'
at the parametric coordinates `parametricCoord'. Only valid for `dim' equal
to 0 (with empty `parametricCoord'), 1 (with `parametricCoord' containing
parametric coordinates on the curve) or 2 (with `parametricCoord'
containing pairs of u, v parametric coordinates on the surface,
concatenated: [p1u, p1v, p2u, ...]). Return triplets of x, y, z coordinates
in `points', concatenated: [p1x, p1y, p1z, p2x, ...].
Return `points'.
"""
api_parametricCoord_, api_parametricCoord_n_ = _ivectordouble(parametricCoord)
api_points_, api_points_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelGetValue(
c_int(dim),
c_int(tag),
api_parametricCoord_, api_parametricCoord_n_,
byref(api_points_), byref(api_points_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetValue returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_points_, api_points_n_.value)
@staticmethod
def getDerivative(dim, tag, parametricCoord):
"""
Evaluate the derivative of the parametrization of the entity of dimension
`dim' and tag `tag' at the parametric coordinates `parametricCoord'. Only
valid for `dim' equal to 1 (with `parametricCoord' containing parametric
coordinates on the curve) or 2 (with `parametricCoord' containing pairs of
u, v parametric coordinates on the surface, concatenated: [p1u, p1v, p2u,
...]). For `dim' equal to 1 return the x, y, z components of the derivative
with respect to u [d1ux, d1uy, d1uz, d2ux, ...]; for `dim' equal to 2
return the x, y, z components of the derivate with respect to u and v:
[d1ux, d1uy, d1uz, d1vx, d1vy, d1vz, d2ux, ...].
Return `derivatives'.
"""
api_parametricCoord_, api_parametricCoord_n_ = _ivectordouble(parametricCoord)
api_derivatives_, api_derivatives_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelGetDerivative(
c_int(dim),
c_int(tag),
api_parametricCoord_, api_parametricCoord_n_,
byref(api_derivatives_), byref(api_derivatives_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetDerivative returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_derivatives_, api_derivatives_n_.value)
@staticmethod
def getCurvature(dim, tag, parametricCoord):
"""
Evaluate the (maximum) curvature of the entity of dimension `dim' and tag
`tag' at the parametric coordinates `parametricCoord'. Only valid for `dim'
equal to 1 (with `parametricCoord' containing parametric coordinates on the
curve) or 2 (with `parametricCoord' containing pairs of u, v parametric
coordinates on the surface, concatenated: [p1u, p1v, p2u, ...]).
Return `curvatures'.
"""
api_parametricCoord_, api_parametricCoord_n_ = _ivectordouble(parametricCoord)
api_curvatures_, api_curvatures_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelGetCurvature(
c_int(dim),
c_int(tag),
api_parametricCoord_, api_parametricCoord_n_,
byref(api_curvatures_), byref(api_curvatures_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetCurvature returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_curvatures_, api_curvatures_n_.value)
@staticmethod
def getPrincipalCurvatures(tag, parametricCoord):
"""
Evaluate the principal curvatures of the surface with tag `tag' at the
parametric coordinates `parametricCoord', as well as their respective
directions. `parametricCoord' are given by pair of u and v coordinates,
concatenated: [p1u, p1v, p2u, ...].
Return `curvatureMax', `curvatureMin', `directionMax', `directionMin'.
"""
api_parametricCoord_, api_parametricCoord_n_ = _ivectordouble(parametricCoord)
api_curvatureMax_, api_curvatureMax_n_ = POINTER(c_double)(), c_size_t()
api_curvatureMin_, api_curvatureMin_n_ = POINTER(c_double)(), c_size_t()
api_directionMax_, api_directionMax_n_ = POINTER(c_double)(), c_size_t()
api_directionMin_, api_directionMin_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelGetPrincipalCurvatures(
c_int(tag),
api_parametricCoord_, api_parametricCoord_n_,
byref(api_curvatureMax_), byref(api_curvatureMax_n_),
byref(api_curvatureMin_), byref(api_curvatureMin_n_),
byref(api_directionMax_), byref(api_directionMax_n_),
byref(api_directionMin_), byref(api_directionMin_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetPrincipalCurvatures returned non-zero error code: ",
ierr.value)
return (
_ovectordouble(api_curvatureMax_, api_curvatureMax_n_.value),
_ovectordouble(api_curvatureMin_, api_curvatureMin_n_.value),
_ovectordouble(api_directionMax_, api_directionMax_n_.value),
_ovectordouble(api_directionMin_, api_directionMin_n_.value))
@staticmethod
def getNormal(tag, parametricCoord):
"""
Get the normal to the surface with tag `tag' at the parametric coordinates
`parametricCoord'. `parametricCoord' are given by pairs of u and v
coordinates, concatenated: [p1u, p1v, p2u, ...]. `normals' are returned as
triplets of x, y, z components, concatenated: [n1x, n1y, n1z, n2x, ...].
Return `normals'.
"""
api_parametricCoord_, api_parametricCoord_n_ = _ivectordouble(parametricCoord)
api_normals_, api_normals_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelGetNormal(
c_int(tag),
api_parametricCoord_, api_parametricCoord_n_,
byref(api_normals_), byref(api_normals_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetNormal returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_normals_, api_normals_n_.value)
@staticmethod
def setVisibility(dimTags, value, recursive=False):
"""
Set the visibility of the model entities `dimTags' to `value'. Apply the
visibility setting recursively if `recursive' is true.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelSetVisibility(
api_dimTags_, api_dimTags_n_,
c_int(value),
c_int(bool(recursive)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelSetVisibility returned non-zero error code: ",
ierr.value)
@staticmethod
def getVisibility(dim, tag):
"""
Get the visibility of the model entity of dimension `dim' and tag `tag'.
Return `value'.
"""
api_value_ = c_int()
ierr = c_int()
lib.gmshModelGetVisibility(
c_int(dim),
c_int(tag),
byref(api_value_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetVisibility returned non-zero error code: ",
ierr.value)
return api_value_.value
@staticmethod
def setColor(dimTags, r, g, b, a=0, recursive=False):
"""
Set the color of the model entities `dimTags' to the RGBA value (`r', `g',
`b', `a'), where `r', `g', `b' and `a' should be integers between 0 and
255. Apply the color setting recursively if `recursive' is true.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelSetColor(
api_dimTags_, api_dimTags_n_,
c_int(r),
c_int(g),
c_int(b),
c_int(a),
c_int(bool(recursive)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelSetColor returned non-zero error code: ",
ierr.value)
@staticmethod
def getColor(dim, tag):
"""
Get the color of the model entity of dimension `dim' and tag `tag'.
Return `r', `g', `b', `a'.
"""
api_r_ = c_int()
api_g_ = c_int()
api_b_ = c_int()
api_a_ = c_int()
ierr = c_int()
lib.gmshModelGetColor(
c_int(dim),
c_int(tag),
byref(api_r_),
byref(api_g_),
byref(api_b_),
byref(api_a_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGetColor returned non-zero error code: ",
ierr.value)
return (
api_r_.value,
api_g_.value,
api_b_.value,
api_a_.value)
@staticmethod
def setCoordinates(tag, x, y, z):
"""
Set the `x', `y', `z' coordinates of a geometrical point.
"""
ierr = c_int()
lib.gmshModelSetCoordinates(
c_int(tag),
c_double(x),
c_double(y),
c_double(z),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelSetCoordinates returned non-zero error code: ",
ierr.value)
class mesh:
"""
Mesh functions
"""
@staticmethod
def generate(dim=3):
"""
Generate a mesh of the current model, up to dimension `dim' (0, 1, 2 or 3).
"""
ierr = c_int()
lib.gmshModelMeshGenerate(
c_int(dim),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGenerate returned non-zero error code: ",
ierr.value)
@staticmethod
def partition(numPart):
"""
Partition the mesh of the current model into `numPart' partitions.
"""
ierr = c_int()
lib.gmshModelMeshPartition(
c_int(numPart),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshPartition returned non-zero error code: ",
ierr.value)
@staticmethod
def unpartition():
"""
Unpartition the mesh of the current model.
"""
ierr = c_int()
lib.gmshModelMeshUnpartition(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshUnpartition returned non-zero error code: ",
ierr.value)
@staticmethod
def optimize(method):
"""
Optimize the mesh of the current model using `method' (empty for default
tetrahedral mesh optimizer, "Netgen" for Netgen optimizer, "HighOrder" for
direct high-order mesh optimizer, "HighOrderElastic" for high-order elastic
smoother).
"""
ierr = c_int()
lib.gmshModelMeshOptimize(
c_char_p(method.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshOptimize returned non-zero error code: ",
ierr.value)
@staticmethod
def recombine():
"""
Recombine the mesh of the current model.
"""
ierr = c_int()
lib.gmshModelMeshRecombine(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRecombine returned non-zero error code: ",
ierr.value)
@staticmethod
def refine():
"""
Refine the mesh of the current model by uniformly splitting the elements.
"""
ierr = c_int()
lib.gmshModelMeshRefine(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRefine returned non-zero error code: ",
ierr.value)
@staticmethod
def smooth():
"""
Smooth the mesh of the current model.
"""
ierr = c_int()
lib.gmshModelMeshSmooth(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSmooth returned non-zero error code: ",
ierr.value)
@staticmethod
def setOrder(order):
"""
Set the order of the elements in the mesh of the current model to `order'.
"""
ierr = c_int()
lib.gmshModelMeshSetOrder(
c_int(order),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetOrder returned non-zero error code: ",
ierr.value)
@staticmethod
def getLastEntityError():
"""
Get the last entities (if any) where a meshing error occurred. Currently
only populated by the new 3D meshing algorithms.
Return `dimTags'.
"""
api_dimTags_, api_dimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetLastEntityError(
byref(api_dimTags_), byref(api_dimTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetLastEntityError returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_dimTags_, api_dimTags_n_.value)
@staticmethod
def getLastNodeError():
"""
Get the last nodes (if any) where a meshing error occurred. Currently only
populated by the new 3D meshing algorithms.
Return `nodeTags'.
"""
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetLastNodeError(
byref(api_nodeTags_), byref(api_nodeTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetLastNodeError returned non-zero error code: ",
ierr.value)
return _ovectorsize(api_nodeTags_, api_nodeTags_n_.value)
@staticmethod
def clear():
"""
Clear the mesh, i.e. delete all the nodes and elements.
"""
ierr = c_int()
lib.gmshModelMeshClear(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshClear returned non-zero error code: ",
ierr.value)
@staticmethod
def getNodes(dim=-1, tag=-1, includeBoundary=False, returnParametricCoord=True):
"""
Get the nodes classified on the entity of dimension `dim' and tag `tag'. If
`tag' < 0, get the nodes for all entities of dimension `dim'. If `dim' and
`tag' are negative, get all the nodes in the mesh. `nodeTags' contains the
node tags (their unique, strictly positive identification numbers). `coord'
is a vector of length 3 times the length of `nodeTags' that contains the x,
y, z coordinates of the nodes, concatenated: [n1x, n1y, n1z, n2x, ...]. If
`dim' >= 0 and `returnParamtricCoord' is set, `parametricCoord' contains
the parametric coordinates ([u1, u2, ...] or [u1, v1, u2, ...]) of the
nodes, if available. The length of `parametricCoord' can be 0 or `dim'
times the length of `nodeTags'. If `includeBoundary' is set, also return
the nodes classified on the boundary of the entity (which will be
reparametrized on the entity if `dim' >= 0 in order to compute their
parametric coordinates).
Return `nodeTags', `coord', `parametricCoord'.
"""
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
api_coord_, api_coord_n_ = POINTER(c_double)(), c_size_t()
api_parametricCoord_, api_parametricCoord_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetNodes(
byref(api_nodeTags_), byref(api_nodeTags_n_),
byref(api_coord_), byref(api_coord_n_),
byref(api_parametricCoord_), byref(api_parametricCoord_n_),
c_int(dim),
c_int(tag),
c_int(bool(includeBoundary)),
c_int(bool(returnParametricCoord)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetNodes returned non-zero error code: ",
ierr.value)
return (
_ovectorsize(api_nodeTags_, api_nodeTags_n_.value),
_ovectordouble(api_coord_, api_coord_n_.value),
_ovectordouble(api_parametricCoord_, api_parametricCoord_n_.value))
@staticmethod
def getNodesByElementType(elementType, tag=-1, returnParametricCoord=True):
"""
Get the nodes classified on the entity of tag `tag', for all the elements
of type `elementType'. The other arguments are treated as in `getNodes'.
Return `nodeTags', `coord', `parametricCoord'.
"""
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
api_coord_, api_coord_n_ = POINTER(c_double)(), c_size_t()
api_parametricCoord_, api_parametricCoord_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetNodesByElementType(
c_int(elementType),
byref(api_nodeTags_), byref(api_nodeTags_n_),
byref(api_coord_), byref(api_coord_n_),
byref(api_parametricCoord_), byref(api_parametricCoord_n_),
c_int(tag),
c_int(bool(returnParametricCoord)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetNodesByElementType returned non-zero error code: ",
ierr.value)
return (
_ovectorsize(api_nodeTags_, api_nodeTags_n_.value),
_ovectordouble(api_coord_, api_coord_n_.value),
_ovectordouble(api_parametricCoord_, api_parametricCoord_n_.value))
@staticmethod
def getNode(nodeTag):
"""
Get the coordinates and the parametric coordinates (if any) of the node
with tag `tag'. This is a sometimes useful but inefficient way of accessing
nodes, as it relies on a cache stored in the model. For large meshes all
the nodes in the model should be numbered in a continuous sequence of tags
from 1 to N to maintain reasonable performance (in this case the internal
cache is based on a vector; otherwise it uses a map).
Return `coord', `parametricCoord'.
"""
api_coord_, api_coord_n_ = POINTER(c_double)(), c_size_t()
api_parametricCoord_, api_parametricCoord_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetNode(
c_size_t(nodeTag),
byref(api_coord_), byref(api_coord_n_),
byref(api_parametricCoord_), byref(api_parametricCoord_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetNode returned non-zero error code: ",
ierr.value)
return (
_ovectordouble(api_coord_, api_coord_n_.value),
_ovectordouble(api_parametricCoord_, api_parametricCoord_n_.value))
@staticmethod
def rebuildNodeCache(onlyIfNecessary=True):
"""
Rebuild the node cache.
"""
ierr = c_int()
lib.gmshModelMeshRebuildNodeCache(
c_int(bool(onlyIfNecessary)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRebuildNodeCache returned non-zero error code: ",
ierr.value)
@staticmethod
def getNodesForPhysicalGroup(dim, tag):
"""
Get the nodes from all the elements belonging to the physical group of
dimension `dim' and tag `tag'. `nodeTags' contains the node tags; `coord'
is a vector of length 3 times the length of `nodeTags' that contains the x,
y, z coordinates of the nodes, concatenated: [n1x, n1y, n1z, n2x, ...].
Return `nodeTags', `coord'.
"""
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
api_coord_, api_coord_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetNodesForPhysicalGroup(
c_int(dim),
c_int(tag),
byref(api_nodeTags_), byref(api_nodeTags_n_),
byref(api_coord_), byref(api_coord_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetNodesForPhysicalGroup returned non-zero error code: ",
ierr.value)
return (
_ovectorsize(api_nodeTags_, api_nodeTags_n_.value),
_ovectordouble(api_coord_, api_coord_n_.value))
@staticmethod
def addNodes(dim, tag, nodeTags, coord, parametricCoord=[]):
"""
Add nodes classified on the model entity of dimension `dim' and tag `tag'.
`nodeTags' contains the node tags (their unique, strictly positive
identification numbers). `coord' is a vector of length 3 times the length
of `nodeTags' that contains the x, y, z coordinates of the nodes,
concatenated: [n1x, n1y, n1z, n2x, ...]. The optional `parametricCoord'
vector contains the parametric coordinates of the nodes, if any. The length
of `parametricCoord' can be 0 or `dim' times the length of `nodeTags'. If
the `nodeTags' vector is empty, new tags are automatically assigned to the
nodes.
"""
api_nodeTags_, api_nodeTags_n_ = _ivectorsize(nodeTags)
api_coord_, api_coord_n_ = _ivectordouble(coord)
api_parametricCoord_, api_parametricCoord_n_ = _ivectordouble(parametricCoord)
ierr = c_int()
lib.gmshModelMeshAddNodes(
c_int(dim),
c_int(tag),
api_nodeTags_, api_nodeTags_n_,
api_coord_, api_coord_n_,
api_parametricCoord_, api_parametricCoord_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshAddNodes returned non-zero error code: ",
ierr.value)
@staticmethod
def reclassifyNodes():
"""
Reclassify all nodes on their associated model entity, based on the
elements. Can be used when importing nodes in bulk (e.g. by associating
them all to a single volume), to reclassify them correctly on model
surfaces, curves, etc. after the elements have been set.
"""
ierr = c_int()
lib.gmshModelMeshReclassifyNodes(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshReclassifyNodes returned non-zero error code: ",
ierr.value)
@staticmethod
def relocateNodes(dim=-1, tag=-1):
"""
Relocate the nodes classified on the entity of dimension `dim' and tag
`tag' using their parametric coordinates. If `tag' < 0, relocate the nodes
for all entities of dimension `dim'. If `dim' and `tag' are negative,
relocate all the nodes in the mesh.
"""
ierr = c_int()
lib.gmshModelMeshRelocateNodes(
c_int(dim),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRelocateNodes returned non-zero error code: ",
ierr.value)
@staticmethod
def getElements(dim=-1, tag=-1):
"""
Get the elements classified on the entity of dimension `dim' and tag `tag'.
If `tag' < 0, get the elements for all entities of dimension `dim'. If
`dim' and `tag' are negative, get all the elements in the mesh.
`elementTypes' contains the MSH types of the elements (e.g. `2' for 3-node
triangles: see `getElementProperties' to obtain the properties for a given
element type). `elementTags' is a vector of the same length as
`elementTypes'; each entry is a vector containing the tags (unique,
strictly positive identifiers) of the elements of the corresponding type.
`nodeTags' is also a vector of the same length as `elementTypes'; each
entry is a vector of length equal to the number of elements of the given
type times the number N of nodes for this type of element, that contains
the node tags of all the elements of the given type, concatenated: [e1n1,
e1n2, ..., e1nN, e2n1, ...].
Return `elementTypes', `elementTags', `nodeTags'.
"""
api_elementTypes_, api_elementTypes_n_ = POINTER(c_int)(), c_size_t()
api_elementTags_, api_elementTags_n_, api_elementTags_nn_ = POINTER(POINTER(c_size_t))(), POINTER(c_size_t)(), c_size_t()
api_nodeTags_, api_nodeTags_n_, api_nodeTags_nn_ = POINTER(POINTER(c_size_t))(), POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetElements(
byref(api_elementTypes_), byref(api_elementTypes_n_),
byref(api_elementTags_), byref(api_elementTags_n_), byref(api_elementTags_nn_),
byref(api_nodeTags_), byref(api_nodeTags_n_), byref(api_nodeTags_nn_),
c_int(dim),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElements returned non-zero error code: ",
ierr.value)
return (
_ovectorint(api_elementTypes_, api_elementTypes_n_.value),
_ovectorvectorsize(api_elementTags_, api_elementTags_n_, api_elementTags_nn_),
_ovectorvectorsize(api_nodeTags_, api_nodeTags_n_, api_nodeTags_nn_))
@staticmethod
def getElement(elementTag):
"""
Get the type and node tags of the element with tag `tag'. This is a
sometimes useful but inefficient way of accessing elements, as it relies on
a cache stored in the model. For large meshes all the elements in the model
should be numbered in a continuous sequence of tags from 1 to N to maintain
reasonable performance (in this case the internal cache is based on a
vector; otherwise it uses a map).
Return `elementType', `nodeTags'.
"""
api_elementType_ = c_int()
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetElement(
c_size_t(elementTag),
byref(api_elementType_),
byref(api_nodeTags_), byref(api_nodeTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElement returned non-zero error code: ",
ierr.value)
return (
api_elementType_.value,
_ovectorsize(api_nodeTags_, api_nodeTags_n_.value))
@staticmethod
def getElementByCoordinates(x, y, z, dim=-1, strict=False):
"""
Search the mesh for an element located at coordinates (`x', `y', `z'). This
is a sometimes useful but inefficient way of accessing elements, as it
relies on a search in a spatial octree. If an element is found, return its
tag, type and node tags, as well as the local coordinates (`u', `v', `w')
within the element corresponding to search location. If `dim' is >= 0, only
search for elements of the given dimension. If `strict' is not set, use a
tolerance to find elements near the search location.
Return `elementTag', `elementType', `nodeTags', `u', `v', `w'.
"""
api_elementTag_ = c_size_t()
api_elementType_ = c_int()
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
api_u_ = c_double()
api_v_ = c_double()
api_w_ = c_double()
ierr = c_int()
lib.gmshModelMeshGetElementByCoordinates(
c_double(x),
c_double(y),
c_double(z),
byref(api_elementTag_),
byref(api_elementType_),
byref(api_nodeTags_), byref(api_nodeTags_n_),
byref(api_u_),
byref(api_v_),
byref(api_w_),
c_int(dim),
c_int(bool(strict)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElementByCoordinates returned non-zero error code: ",
ierr.value)
return (
api_elementTag_.value,
api_elementType_.value,
_ovectorsize(api_nodeTags_, api_nodeTags_n_.value),
api_u_.value,
api_v_.value,
api_w_.value)
@staticmethod
def getElementTypes(dim=-1, tag=-1):
"""
Get the types of elements in the entity of dimension `dim' and tag `tag'.
If `tag' < 0, get the types for all entities of dimension `dim'. If `dim'
and `tag' are negative, get all the types in the mesh.
Return `elementTypes'.
"""
api_elementTypes_, api_elementTypes_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetElementTypes(
byref(api_elementTypes_), byref(api_elementTypes_n_),
c_int(dim),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElementTypes returned non-zero error code: ",
ierr.value)
return _ovectorint(api_elementTypes_, api_elementTypes_n_.value)
@staticmethod
def getElementType(familyName, order, serendip=False):
"""
Return an element type given its family name `familyName' ("point", "line",
"triangle", "quadrangle", "tetrahedron", "pyramid", "prism", "hexahedron")
and polynomial order `order'. If `serendip' is true, return the
corresponding serendip element type (element without interior nodes).
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelMeshGetElementType(
c_char_p(familyName.encode()),
c_int(order),
c_int(bool(serendip)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElementType returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def getElementProperties(elementType):
"""
Get the properties of an element of type `elementType': its name
(`elementName'), dimension (`dim'), order (`order'), number of nodes
(`numNodes') and coordinates of the nodes in the reference element
(`nodeCoord' vector, of length `dim' times `numNodes').
Return `elementName', `dim', `order', `numNodes', `nodeCoord'.
"""
api_elementName_ = c_char_p()
api_dim_ = c_int()
api_order_ = c_int()
api_numNodes_ = c_int()
api_nodeCoord_, api_nodeCoord_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetElementProperties(
c_int(elementType),
byref(api_elementName_),
byref(api_dim_),
byref(api_order_),
byref(api_numNodes_),
byref(api_nodeCoord_), byref(api_nodeCoord_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElementProperties returned non-zero error code: ",
ierr.value)
return (
_ostring(api_elementName_),
api_dim_.value,
api_order_.value,
api_numNodes_.value,
_ovectordouble(api_nodeCoord_, api_nodeCoord_n_.value))
@staticmethod
def getElementsByType(elementType, tag=-1, task=0, numTasks=1):
"""
Get the elements of type `elementType' classified on the entity of tag
`tag'. If `tag' < 0, get the elements for all entities. `elementTags' is a
vector containing the tags (unique, strictly positive identifiers) of the
elements of the corresponding type. `nodeTags' is a vector of length equal
to the number of elements of the given type times the number N of nodes for
this type of element, that contains the node tags of all the elements of
the given type, concatenated: [e1n1, e1n2, ..., e1nN, e2n1, ...]. If
`numTasks' > 1, only compute and return the part of the data indexed by
`task'.
Return `elementTags', `nodeTags'.
"""
api_elementTags_, api_elementTags_n_ = POINTER(c_size_t)(), c_size_t()
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetElementsByType(
c_int(elementType),
byref(api_elementTags_), byref(api_elementTags_n_),
byref(api_nodeTags_), byref(api_nodeTags_n_),
c_int(tag),
c_size_t(task),
c_size_t(numTasks),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElementsByType returned non-zero error code: ",
ierr.value)
return (
_ovectorsize(api_elementTags_, api_elementTags_n_.value),
_ovectorsize(api_nodeTags_, api_nodeTags_n_.value))
@staticmethod
def addElements(dim, tag, elementTypes, elementTags, nodeTags):
"""
Add elements classified on the entity of dimension `dim' and tag `tag'.
`types' contains the MSH types of the elements (e.g. `2' for 3-node
triangles: see the Gmsh reference manual). `elementTags' is a vector of the
same length as `types'; each entry is a vector containing the tags (unique,
strictly positive identifiers) of the elements of the corresponding type.
`nodeTags' is also a vector of the same length as `types'; each entry is a
vector of length equal to the number of elements of the given type times
the number N of nodes per element, that contains the node tags of all the
elements of the given type, concatenated: [e1n1, e1n2, ..., e1nN, e2n1,
...].
"""
api_elementTypes_, api_elementTypes_n_ = _ivectorint(elementTypes)
api_elementTags_, api_elementTags_n_, api_elementTags_nn_ = _ivectorvectorsize(elementTags)
api_nodeTags_, api_nodeTags_n_, api_nodeTags_nn_ = _ivectorvectorsize(nodeTags)
ierr = c_int()
lib.gmshModelMeshAddElements(
c_int(dim),
c_int(tag),
api_elementTypes_, api_elementTypes_n_,
api_elementTags_, api_elementTags_n_, api_elementTags_nn_,
api_nodeTags_, api_nodeTags_n_, api_nodeTags_nn_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshAddElements returned non-zero error code: ",
ierr.value)
@staticmethod
def addElementsByType(tag, elementType, elementTags, nodeTags):
"""
Add elements of type `elementType' classified on the entity of tag `tag'.
`elementTags' contains the tags (unique, strictly positive identifiers) of
the elements of the corresponding type. `nodeTags' is a vector of length
equal to the number of elements times the number N of nodes per element,
that contains the node tags of all the elements, concatenated: [e1n1, e1n2,
..., e1nN, e2n1, ...]. If the `elementTag' vector is empty, new tags are
automatically assigned to the elements.
"""
api_elementTags_, api_elementTags_n_ = _ivectorsize(elementTags)
api_nodeTags_, api_nodeTags_n_ = _ivectorsize(nodeTags)
ierr = c_int()
lib.gmshModelMeshAddElementsByType(
c_int(tag),
c_int(elementType),
api_elementTags_, api_elementTags_n_,
api_nodeTags_, api_nodeTags_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshAddElementsByType returned non-zero error code: ",
ierr.value)
@staticmethod
def getIntegrationPoints(elementType, integrationType):
"""
Get the numerical quadrature information for the given element type
`elementType' and integration rule `integrationType' (e.g. "Gauss4" for a
Gauss quadrature suited for integrating 4th order polynomials).
`integrationPoints' contains the u, v, w coordinates of the G integration
points in the reference element: [g1u, g1v, g1w, ..., gGu, gGv, gGw].
`integrationWeigths' contains the associated weights: [g1q, ..., gGq].
Return `integrationPoints', `integrationWeights'.
"""
api_integrationPoints_, api_integrationPoints_n_ = POINTER(c_double)(), c_size_t()
api_integrationWeights_, api_integrationWeights_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetIntegrationPoints(
c_int(elementType),
c_char_p(integrationType.encode()),
byref(api_integrationPoints_), byref(api_integrationPoints_n_),
byref(api_integrationWeights_), byref(api_integrationWeights_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetIntegrationPoints returned non-zero error code: ",
ierr.value)
return (
_ovectordouble(api_integrationPoints_, api_integrationPoints_n_.value),
_ovectordouble(api_integrationWeights_, api_integrationWeights_n_.value))
@staticmethod
def getJacobians(elementType, integrationPoints, tag=-1, task=0, numTasks=1):
"""
Get the Jacobians of all the elements of type `elementType' classified on
the entity of tag `tag', at the G integration points `integrationPoints'
given as concatenated triplets of coordinates in the reference element
[g1u, g1v, g1w, ..., gGu, gGv, gGw]. Data is returned by element, with
elements in the same order as in `getElements' and `getElementsByType'.
`jacobians' contains for each element the 9 entries of the 3x3 Jacobian
matrix at each integration point. The matrix is returned by column:
[e1g1Jxu, e1g1Jyu, e1g1Jzu, e1g1Jxv, ..., e1g1Jzw, e1g2Jxu, ..., e1gGJzw,
e2g1Jxu, ...], with Jxu=dx/du, Jyu=dy/du, etc. `determinants' contains for
each element the determinant of the Jacobian matrix at each integration
point: [e1g1, e1g2, ... e1gG, e2g1, ...]. `points' contains for each
element the x, y, z coordinates of the integration points. If `tag' < 0,
get the Jacobian data for all entities. If `numTasks' > 1, only compute and
return the part of the data indexed by `task'.
Return `jacobians', `determinants', `points'.
"""
api_integrationPoints_, api_integrationPoints_n_ = _ivectordouble(integrationPoints)
api_jacobians_, api_jacobians_n_ = POINTER(c_double)(), c_size_t()
api_determinants_, api_determinants_n_ = POINTER(c_double)(), c_size_t()
api_points_, api_points_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetJacobians(
c_int(elementType),
api_integrationPoints_, api_integrationPoints_n_,
byref(api_jacobians_), byref(api_jacobians_n_),
byref(api_determinants_), byref(api_determinants_n_),
byref(api_points_), byref(api_points_n_),
c_int(tag),
c_size_t(task),
c_size_t(numTasks),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetJacobians returned non-zero error code: ",
ierr.value)
return (
_ovectordouble(api_jacobians_, api_jacobians_n_.value),
_ovectordouble(api_determinants_, api_determinants_n_.value),
_ovectordouble(api_points_, api_points_n_.value))
@staticmethod
def getBasisFunctions(elementType, integrationPoints, functionSpaceType):
"""
Get the basis functions of the element of type `elementType' at the
integration points `integrationPoints' (given as concatenated triplets of
coordinates in the reference element [g1u, g1v, g1w, ..., gGu, gGv, gGw]),
for the function space `functionSpaceType' (e.g. "Lagrange" or
"GradLagrange" for Lagrange basis functions or their gradient, in the u, v,
w coordinates of the reference element). `numComponents' returns the number
C of components of a basis function. `basisFunctions' returns the value of
the N basis functions at the integration points, i.e. [g1f1, g1f2, ...,
g1fN, g2f1, ...] when C == 1 or [g1f1u, g1f1v, g1f1w, g1f2u, ..., g1fNw,
g2f1u, ...] when C == 3.
Return `numComponents', `basisFunctions'.
"""
api_integrationPoints_, api_integrationPoints_n_ = _ivectordouble(integrationPoints)
api_numComponents_ = c_int()
api_basisFunctions_, api_basisFunctions_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetBasisFunctions(
c_int(elementType),
api_integrationPoints_, api_integrationPoints_n_,
c_char_p(functionSpaceType.encode()),
byref(api_numComponents_),
byref(api_basisFunctions_), byref(api_basisFunctions_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetBasisFunctions returned non-zero error code: ",
ierr.value)
return (
api_numComponents_.value,
_ovectordouble(api_basisFunctions_, api_basisFunctions_n_.value))
@staticmethod
def getBasisFunctionsForElements(elementType, integrationPoints, functionSpaceType, tag=-1):
"""
Get the element-dependent basis functions of the elements of type
`elementType' in the entity of tag `tag'at the integration points
`integrationPoints' (given as concatenated triplets of coordinates in the
reference element [g1u, g1v, g1w, ..., gGu, gGv, gGw]), for the function
space `functionSpaceType' (e.g. "H1Legendre3" or "GradH1Legendre3" for 3rd
order hierarchical H1 Legendre functions or their gradient, in the u, v, w
coordinates of the reference elements). `numComponents' returns the number
C of components of a basis function. `numBasisFunctions' returns the number
N of basis functions per element. `basisFunctions' returns the value of the
basis functions at the integration points for each element: [e1g1f1,...,
e1g1fN, e1g2f1,..., e2g1f1, ...] when C == 1 or [e1g1f1u, e1g1f1v,...,
e1g1fNw, e1g2f1u,..., e2g1f1u, ...]. Warning: this is an experimental
feature and will probably change in a future release.
Return `numComponents', `numFunctionsPerElements', `basisFunctions'.
"""
api_integrationPoints_, api_integrationPoints_n_ = _ivectordouble(integrationPoints)
api_numComponents_ = c_int()
api_numFunctionsPerElements_ = c_int()
api_basisFunctions_, api_basisFunctions_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetBasisFunctionsForElements(
c_int(elementType),
api_integrationPoints_, api_integrationPoints_n_,
c_char_p(functionSpaceType.encode()),
byref(api_numComponents_),
byref(api_numFunctionsPerElements_),
byref(api_basisFunctions_), byref(api_basisFunctions_n_),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetBasisFunctionsForElements returned non-zero error code: ",
ierr.value)
return (
api_numComponents_.value,
api_numFunctionsPerElements_.value,
_ovectordouble(api_basisFunctions_, api_basisFunctions_n_.value))
@staticmethod
def getKeysForElements(elementType, functionSpaceType, tag=-1, returnCoord=True):
"""
Generate the `keys' for the elements of type `elementType' in the entity of
tag `tag', for the `functionSpaceType' function space. Each key uniquely
identifies a basis function in the function space. If `returnCoord' is set,
the `coord' vector contains the x, y, z coordinates locating basis
functions for sorting purposes. Warning: this is an experimental feature
and will probably change in a future release.
Return `keys', `coord'.
"""
api_keys_, api_keys_n_ = POINTER(c_int)(), c_size_t()
api_coord_, api_coord_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetKeysForElements(
c_int(elementType),
c_char_p(functionSpaceType.encode()),
byref(api_keys_), byref(api_keys_n_),
byref(api_coord_), byref(api_coord_n_),
c_int(tag),
c_int(bool(returnCoord)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetKeysForElements returned non-zero error code: ",
ierr.value)
return (
_ovectorpair(api_keys_, api_keys_n_.value),
_ovectordouble(api_coord_, api_coord_n_.value))
@staticmethod
def getInformationForElements(keys, order, elementType):
"""
Get information about the `keys'. Warning: this is an experimental feature
and will probably change in a future release.
Return `info'.
"""
api_keys_, api_keys_n_ = _ivectorpair(keys)
api_info_, api_info_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetInformationForElements(
api_keys_, api_keys_n_,
byref(api_info_), byref(api_info_n_),
c_int(order),
c_int(elementType),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetInformationForElements returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_info_, api_info_n_.value)
@staticmethod
def precomputeBasisFunctions(elementType):
"""
Precomputes the basis functions corresponding to `elementType'.
"""
ierr = c_int()
lib.gmshModelMeshPrecomputeBasisFunctions(
c_int(elementType),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshPrecomputeBasisFunctions returned non-zero error code: ",
ierr.value)
@staticmethod
def getBarycenters(elementType, tag, fast, primary, task=0, numTasks=1):
"""
Get the barycenters of all elements of type `elementType' classified on the
entity of tag `tag'. If `primary' is set, only the primary nodes of the
elements are taken into account for the barycenter calculation. If `fast'
is set, the function returns the sum of the primary node coordinates
(without normalizing by the number of nodes). If `tag' < 0, get the
barycenters for all entities. If `numTasks' > 1, only compute and return
the part of the data indexed by `task'.
Return `barycenters'.
"""
api_barycenters_, api_barycenters_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetBarycenters(
c_int(elementType),
c_int(tag),
c_int(bool(fast)),
c_int(bool(primary)),
byref(api_barycenters_), byref(api_barycenters_n_),
c_size_t(task),
c_size_t(numTasks),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetBarycenters returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_barycenters_, api_barycenters_n_.value)
@staticmethod
def getElementEdgeNodes(elementType, tag=-1, primary=False, task=0, numTasks=1):
"""
Get the nodes on the edges of all elements of type `elementType' classified
on the entity of tag `tag'. `nodeTags' contains the node tags of the edges
for all the elements: [e1a1n1, e1a1n2, e1a2n1, ...]. Data is returned by
element, with elements in the same order as in `getElements' and
`getElementsByType'. If `primary' is set, only the primary (begin/end)
nodes of the edges are returned. If `tag' < 0, get the edge nodes for all
entities. If `numTasks' > 1, only compute and return the part of the data
indexed by `task'.
Return `nodeTags'.
"""
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetElementEdgeNodes(
c_int(elementType),
byref(api_nodeTags_), byref(api_nodeTags_n_),
c_int(tag),
c_int(bool(primary)),
c_size_t(task),
c_size_t(numTasks),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElementEdgeNodes returned non-zero error code: ",
ierr.value)
return _ovectorsize(api_nodeTags_, api_nodeTags_n_.value)
@staticmethod
def getElementFaceNodes(elementType, faceType, tag=-1, primary=False, task=0, numTasks=1):
"""
Get the nodes on the faces of type `faceType' (3 for triangular faces, 4
for quadrangular faces) of all elements of type `elementType' classified on
the entity of tag `tag'. `nodeTags' contains the node tags of the faces for
all elements: [e1f1n1, ..., e1f1nFaceType, e1f2n1, ...]. Data is returned
by element, with elements in the same order as in `getElements' and
`getElementsByType'. If `primary' is set, only the primary (corner) nodes
of the faces are returned. If `tag' < 0, get the face nodes for all
entities. If `numTasks' > 1, only compute and return the part of the data
indexed by `task'.
Return `nodeTags'.
"""
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetElementFaceNodes(
c_int(elementType),
c_int(faceType),
byref(api_nodeTags_), byref(api_nodeTags_n_),
c_int(tag),
c_int(bool(primary)),
c_size_t(task),
c_size_t(numTasks),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetElementFaceNodes returned non-zero error code: ",
ierr.value)
return _ovectorsize(api_nodeTags_, api_nodeTags_n_.value)
@staticmethod
def getGhostElements(dim, tag):
"""
Get the ghost elements `elementTags' and their associated `partitions'
stored in the ghost entity of dimension `dim' and tag `tag'.
Return `elementTags', `partitions'.
"""
api_elementTags_, api_elementTags_n_ = POINTER(c_size_t)(), c_size_t()
api_partitions_, api_partitions_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetGhostElements(
c_int(dim),
c_int(tag),
byref(api_elementTags_), byref(api_elementTags_n_),
byref(api_partitions_), byref(api_partitions_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetGhostElements returned non-zero error code: ",
ierr.value)
return (
_ovectorsize(api_elementTags_, api_elementTags_n_.value),
_ovectorint(api_partitions_, api_partitions_n_.value))
@staticmethod
def setSize(dimTags, size):
"""
Set a mesh size constraint on the model entities `dimTags'. Currently only
entities of dimension 0 (points) are handled.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelMeshSetSize(
api_dimTags_, api_dimTags_n_,
c_double(size),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetSize returned non-zero error code: ",
ierr.value)
@staticmethod
def setTransfiniteCurve(tag, numNodes, meshType="Progression", coef=1.):
"""
Set a transfinite meshing constraint on the curve `tag', with `numNodes'
nodes distributed according to `meshType' and `coef'. Currently supported
types are "Progression" (geometrical progression with power `coef') and
"Bump" (refinement toward both extremities of the curve).
"""
ierr = c_int()
lib.gmshModelMeshSetTransfiniteCurve(
c_int(tag),
c_int(numNodes),
c_char_p(meshType.encode()),
c_double(coef),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetTransfiniteCurve returned non-zero error code: ",
ierr.value)
@staticmethod
def setTransfiniteSurface(tag, arrangement="Left", cornerTags=[]):
"""
Set a transfinite meshing constraint on the surface `tag'. `arrangement'
describes the arrangement of the triangles when the surface is not flagged
as recombined: currently supported values are "Left", "Right",
"AlternateLeft" and "AlternateRight". `cornerTags' can be used to specify
the (3 or 4) corners of the transfinite interpolation explicitly;
specifying the corners explicitly is mandatory if the surface has more that
3 or 4 points on its boundary.
"""
api_cornerTags_, api_cornerTags_n_ = _ivectorint(cornerTags)
ierr = c_int()
lib.gmshModelMeshSetTransfiniteSurface(
c_int(tag),
c_char_p(arrangement.encode()),
api_cornerTags_, api_cornerTags_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetTransfiniteSurface returned non-zero error code: ",
ierr.value)
@staticmethod
def setTransfiniteVolume(tag, cornerTags=[]):
"""
Set a transfinite meshing constraint on the surface `tag'. `cornerTags' can
be used to specify the (6 or 8) corners of the transfinite interpolation
explicitly.
"""
api_cornerTags_, api_cornerTags_n_ = _ivectorint(cornerTags)
ierr = c_int()
lib.gmshModelMeshSetTransfiniteVolume(
c_int(tag),
api_cornerTags_, api_cornerTags_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetTransfiniteVolume returned non-zero error code: ",
ierr.value)
@staticmethod
def setRecombine(dim, tag):
"""
Set a recombination meshing constraint on the model entity of dimension
`dim' and tag `tag'. Currently only entities of dimension 2 (to recombine
triangles into quadrangles) are supported.
"""
ierr = c_int()
lib.gmshModelMeshSetRecombine(
c_int(dim),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetRecombine returned non-zero error code: ",
ierr.value)
@staticmethod
def setSmoothing(dim, tag, val):
"""
Set a smoothing meshing constraint on the model entity of dimension `dim'
and tag `tag'. `val' iterations of a Laplace smoother are applied.
"""
ierr = c_int()
lib.gmshModelMeshSetSmoothing(
c_int(dim),
c_int(tag),
c_int(val),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetSmoothing returned non-zero error code: ",
ierr.value)
@staticmethod
def setReverse(dim, tag, val=True):
"""
Set a reverse meshing constraint on the model entity of dimension `dim' and
tag `tag'. If `val' is true, the mesh orientation will be reversed with
respect to the natural mesh orientation (i.e. the orientation consistent
with the orientation of the geometry). If `val' is false, the mesh is left
as-is.
"""
ierr = c_int()
lib.gmshModelMeshSetReverse(
c_int(dim),
c_int(tag),
c_int(bool(val)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetReverse returned non-zero error code: ",
ierr.value)
@staticmethod
def setOutwardOrientation(tag):
"""
Set meshing constraints on the bounding surfaces of the volume of tag `tag'
so that all surfaces are oriented with outward pointing normals. Currently
only available with the OpenCASCADE kernel, as it relies on the STL
triangulation.
"""
ierr = c_int()
lib.gmshModelMeshSetOutwardOrientation(
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetOutwardOrientation returned non-zero error code: ",
ierr.value)
@staticmethod
def embed(dim, tags, inDim, inTag):
"""
Embed the model entities of dimension `dim' and tags `tags' in the (inDim,
inTag) model entity. `inDim' must be strictly greater than `dim'.
"""
api_tags_, api_tags_n_ = _ivectorint(tags)
ierr = c_int()
lib.gmshModelMeshEmbed(
c_int(dim),
api_tags_, api_tags_n_,
c_int(inDim),
c_int(inTag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshEmbed returned non-zero error code: ",
ierr.value)
@staticmethod
def removeEmbedded(dimTags, dim=-1):
"""
Remove embedded entities in the model entities `dimTags'. if `dim' is >= 0,
only remove embedded entities of the given dimension (e.g. embedded points
if `dim' == 0).
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelMeshRemoveEmbedded(
api_dimTags_, api_dimTags_n_,
c_int(dim),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRemoveEmbedded returned non-zero error code: ",
ierr.value)
@staticmethod
def reorderElements(elementType, tag, ordering):
"""
Reorder the elements of type `elementType' classified on the entity of tag
`tag' according to `ordering'.
"""
api_ordering_, api_ordering_n_ = _ivectorsize(ordering)
ierr = c_int()
lib.gmshModelMeshReorderElements(
c_int(elementType),
c_int(tag),
api_ordering_, api_ordering_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshReorderElements returned non-zero error code: ",
ierr.value)
@staticmethod
def renumberNodes():
"""
Renumber the node tags in a continuous sequence.
"""
ierr = c_int()
lib.gmshModelMeshRenumberNodes(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRenumberNodes returned non-zero error code: ",
ierr.value)
@staticmethod
def renumberElements():
"""
Renumber the element tags in a continuous sequence.
"""
ierr = c_int()
lib.gmshModelMeshRenumberElements(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRenumberElements returned non-zero error code: ",
ierr.value)
@staticmethod
def setPeriodic(dim, tags, tagsMaster, affineTransform):
"""
Set the meshes of the entities of dimension `dim' and tag `tags' as
periodic copies of the meshes of entities `tagsMaster', using the affine
transformation specified in `affineTransformation' (16 entries of a 4x4
matrix, by row). Currently only available for `dim' == 1 and `dim' == 2.
"""
api_tags_, api_tags_n_ = _ivectorint(tags)
api_tagsMaster_, api_tagsMaster_n_ = _ivectorint(tagsMaster)
api_affineTransform_, api_affineTransform_n_ = _ivectordouble(affineTransform)
ierr = c_int()
lib.gmshModelMeshSetPeriodic(
c_int(dim),
api_tags_, api_tags_n_,
api_tagsMaster_, api_tagsMaster_n_,
api_affineTransform_, api_affineTransform_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSetPeriodic returned non-zero error code: ",
ierr.value)
@staticmethod
def getPeriodicNodes(dim, tag):
"""
Get the master entity `tagMaster', the node tags `nodeTags' and their
corresponding master node tags `nodeTagsMaster', and the affine transform
`affineTransform' for the entity of dimension `dim' and tag `tag'.
Return `tagMaster', `nodeTags', `nodeTagsMaster', `affineTransform'.
"""
api_tagMaster_ = c_int()
api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()
api_nodeTagsMaster_, api_nodeTagsMaster_n_ = POINTER(c_size_t)(), c_size_t()
api_affineTransform_, api_affineTransform_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelMeshGetPeriodicNodes(
c_int(dim),
c_int(tag),
byref(api_tagMaster_),
byref(api_nodeTags_), byref(api_nodeTags_n_),
byref(api_nodeTagsMaster_), byref(api_nodeTagsMaster_n_),
byref(api_affineTransform_), byref(api_affineTransform_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshGetPeriodicNodes returned non-zero error code: ",
ierr.value)
return (
api_tagMaster_.value,
_ovectorsize(api_nodeTags_, api_nodeTags_n_.value),
_ovectorsize(api_nodeTagsMaster_, api_nodeTagsMaster_n_.value),
_ovectordouble(api_affineTransform_, api_affineTransform_n_.value))
@staticmethod
def removeDuplicateNodes():
"""
Remove duplicate nodes in the mesh of the current model.
"""
ierr = c_int()
lib.gmshModelMeshRemoveDuplicateNodes(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshRemoveDuplicateNodes returned non-zero error code: ",
ierr.value)
@staticmethod
def splitQuadrangles(quality=1., tag=-1):
"""
Split (into two triangles) all quadrangles in surface `tag' whose quality
is lower than `quality'. If `tag' < 0, split quadrangles in all surfaces.
"""
ierr = c_int()
lib.gmshModelMeshSplitQuadrangles(
c_double(quality),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshSplitQuadrangles returned non-zero error code: ",
ierr.value)
@staticmethod
def classifySurfaces(angle, boundary=True, forReparametrization=False):
"""
Classify ("color") the surface mesh based on the angle threshold `angle'
(in radians), and create new discrete surfaces, curves and points
accordingly. If `boundary' is set, also create discrete curves on the
boundary if the surface is open. If `forReparametrization' is set, create
edges and surfaces that can be reparametrized using a single map.
"""
ierr = c_int()
lib.gmshModelMeshClassifySurfaces(
c_double(angle),
c_int(bool(boundary)),
c_int(bool(forReparametrization)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshClassifySurfaces returned non-zero error code: ",
ierr.value)
@staticmethod
def createGeometry():
"""
Create a parametrization for discrete curves and surfaces (i.e. curves and
surfaces represented solely by a mesh, without an underlying CAD
description), assuming that each can be parametrized with a single map.
"""
ierr = c_int()
lib.gmshModelMeshCreateGeometry(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshCreateGeometry returned non-zero error code: ",
ierr.value)
@staticmethod
def createTopology():
"""
Create a boundary representation from the mesh if the model does not have
one (e.g. when imported from mesh file formats with no BRep representation
of the underlying model).
"""
ierr = c_int()
lib.gmshModelMeshCreateTopology(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshCreateTopology returned non-zero error code: ",
ierr.value)
@staticmethod
def computeHomology(domainTags=[], subdomainTags=[], dims=[]):
"""
Compute a basis representation for homology spaces after a mesh has been
generated. The computation domain is given in a list of physical group tags
`domainTags'; if empty, the whole mesh is the domain. The computation
subdomain for relative homology computation is given in a list of physical
group tags `subdomainTags'; if empty, absolute homology is computed. The
dimensions homology bases to be computed are given in the list `dim'; if
empty, all bases are computed. Resulting basis representation chains are
stored as physical groups in the mesh.
"""
api_domainTags_, api_domainTags_n_ = _ivectorint(domainTags)
api_subdomainTags_, api_subdomainTags_n_ = _ivectorint(subdomainTags)
api_dims_, api_dims_n_ = _ivectorint(dims)
ierr = c_int()
lib.gmshModelMeshComputeHomology(
api_domainTags_, api_domainTags_n_,
api_subdomainTags_, api_subdomainTags_n_,
api_dims_, api_dims_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshComputeHomology returned non-zero error code: ",
ierr.value)
@staticmethod
def computeCohomology(domainTags=[], subdomainTags=[], dims=[]):
"""
Compute a basis representation for cohomology spaces after a mesh has been
generated. The computation domain is given in a list of physical group tags
`domainTags'; if empty, the whole mesh is the domain. The computation
subdomain for relative cohomology computation is given in a list of
physical group tags `subdomainTags'; if empty, absolute cohomology is
computed. The dimensions homology bases to be computed are given in the
list `dim'; if empty, all bases are computed. Resulting basis
representation cochains are stored as physical groups in the mesh.
"""
api_domainTags_, api_domainTags_n_ = _ivectorint(domainTags)
api_subdomainTags_, api_subdomainTags_n_ = _ivectorint(subdomainTags)
api_dims_, api_dims_n_ = _ivectorint(dims)
ierr = c_int()
lib.gmshModelMeshComputeCohomology(
api_domainTags_, api_domainTags_n_,
api_subdomainTags_, api_subdomainTags_n_,
api_dims_, api_dims_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshComputeCohomology returned non-zero error code: ",
ierr.value)
class field:
"""
Mesh size field functions
"""
@staticmethod
def add(fieldType, tag=-1):
"""
Add a new mesh size field of type `fieldType'. If `tag' is positive, assign
the tag explicitly; otherwise a new tag is assigned automatically. Return
the field tag.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelMeshFieldAdd(
c_char_p(fieldType.encode()),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshFieldAdd returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def remove(tag):
"""
Remove the field with tag `tag'.
"""
ierr = c_int()
lib.gmshModelMeshFieldRemove(
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshFieldRemove returned non-zero error code: ",
ierr.value)
@staticmethod
def setNumber(tag, option, value):
"""
Set the numerical option `option' to value `value' for field `tag'.
"""
ierr = c_int()
lib.gmshModelMeshFieldSetNumber(
c_int(tag),
c_char_p(option.encode()),
c_double(value),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshFieldSetNumber returned non-zero error code: ",
ierr.value)
@staticmethod
def setString(tag, option, value):
"""
Set the string option `option' to value `value' for field `tag'.
"""
ierr = c_int()
lib.gmshModelMeshFieldSetString(
c_int(tag),
c_char_p(option.encode()),
c_char_p(value.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshFieldSetString returned non-zero error code: ",
ierr.value)
@staticmethod
def setNumbers(tag, option, value):
"""
Set the numerical list option `option' to value `value' for field `tag'.
"""
api_value_, api_value_n_ = _ivectordouble(value)
ierr = c_int()
lib.gmshModelMeshFieldSetNumbers(
c_int(tag),
c_char_p(option.encode()),
api_value_, api_value_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshFieldSetNumbers returned non-zero error code: ",
ierr.value)
@staticmethod
def setAsBackgroundMesh(tag):
"""
Set the field `tag' as the background mesh size field.
"""
ierr = c_int()
lib.gmshModelMeshFieldSetAsBackgroundMesh(
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshFieldSetAsBackgroundMesh returned non-zero error code: ",
ierr.value)
@staticmethod
def setAsBoundaryLayer(tag):
"""
Set the field `tag' as a boundary layer size field.
"""
ierr = c_int()
lib.gmshModelMeshFieldSetAsBoundaryLayer(
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelMeshFieldSetAsBoundaryLayer returned non-zero error code: ",
ierr.value)
class geo:
"""
Built-in CAD kernel functions
"""
@staticmethod
def addPoint(x, y, z, meshSize=0., tag=-1):
"""
Add a geometrical point in the built-in CAD representation, at coordinates
(`x', `y', `z'). If `meshSize' is > 0, add a meshing constraint at that
point. If `tag' is positive, set the tag explicitly; otherwise a new tag is
selected automatically. Return the tag of the point. (Note that the point
will be added in the current model only after `synchronize' is called. This
behavior holds for all the entities added in the geo module.)
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelGeoAddPoint(
c_double(x),
c_double(y),
c_double(z),
c_double(meshSize),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddPoint returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addLine(startTag, endTag, tag=-1):
"""
Add a straight line segment between the two points with tags `startTag' and
`endTag'. If `tag' is positive, set the tag explicitly; otherwise a new tag
is selected automatically. Return the tag of the line.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelGeoAddLine(
c_int(startTag),
c_int(endTag),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddLine returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addCircleArc(startTag, centerTag, endTag, tag=-1, nx=0., ny=0., nz=0.):
"""
Add a circle arc (strictly smaller than Pi) between the two points with
tags `startTag' and `endTag', with center `centertag'. If `tag' is
positive, set the tag explicitly; otherwise a new tag is selected
automatically. If (`nx', `ny', `nz') != (0,0,0), explicitly set the plane
of the circle arc. Return the tag of the circle arc.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelGeoAddCircleArc(
c_int(startTag),
c_int(centerTag),
c_int(endTag),
c_int(tag),
c_double(nx),
c_double(ny),
c_double(nz),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddCircleArc returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addEllipseArc(startTag, centerTag, majorTag, endTag, tag=-1, nx=0., ny=0., nz=0.):
"""
Add an ellipse arc (strictly smaller than Pi) between the two points
`startTag' and `endTag', with center `centertag' and major axis point
`majorTag'. If `tag' is positive, set the tag explicitly; otherwise a new
tag is selected automatically. If (`nx', `ny', `nz') != (0,0,0), explicitly
set the plane of the circle arc. Return the tag of the ellipse arc.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelGeoAddEllipseArc(
c_int(startTag),
c_int(centerTag),
c_int(majorTag),
c_int(endTag),
c_int(tag),
c_double(nx),
c_double(ny),
c_double(nz),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddEllipseArc returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addSpline(pointTags, tag=-1):
"""
Add a spline (Catmull-Rom) curve going through the points `pointTags'. If
`tag' is positive, set the tag explicitly; otherwise a new tag is selected
automatically. Create a periodic curve if the first and last points are the
same. Return the tag of the spline curve.
Return an integer value.
"""
api_pointTags_, api_pointTags_n_ = _ivectorint(pointTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddSpline(
api_pointTags_, api_pointTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddSpline returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addBSpline(pointTags, tag=-1):
"""
Add a cubic b-spline curve with `pointTags' control points. If `tag' is
positive, set the tag explicitly; otherwise a new tag is selected
automatically. Creates a periodic curve if the first and last points are
the same. Return the tag of the b-spline curve.
Return an integer value.
"""
api_pointTags_, api_pointTags_n_ = _ivectorint(pointTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddBSpline(
api_pointTags_, api_pointTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddBSpline returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addBezier(pointTags, tag=-1):
"""
Add a Bezier curve with `pointTags' control points. If `tag' is positive,
set the tag explicitly; otherwise a new tag is selected automatically.
Return the tag of the Bezier curve.
Return an integer value.
"""
api_pointTags_, api_pointTags_n_ = _ivectorint(pointTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddBezier(
api_pointTags_, api_pointTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddBezier returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addCurveLoop(curveTags, tag=-1):
"""
Add a curve loop (a closed wire) formed by the curves `curveTags'.
`curveTags' should contain (signed) tags of model enties of dimension 1
forming a closed loop: a negative tag signifies that the underlying curve
is considered with reversed orientation. If `tag' is positive, set the tag
explicitly; otherwise a new tag is selected automatically. Return the tag
of the curve loop.
Return an integer value.
"""
api_curveTags_, api_curveTags_n_ = _ivectorint(curveTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddCurveLoop(
api_curveTags_, api_curveTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddCurveLoop returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addPlaneSurface(wireTags, tag=-1):
"""
Add a plane surface defined by one or more curve loops `wireTags'. The
first curve loop defines the exterior contour; additional curve loop define
holes. If `tag' is positive, set the tag explicitly; otherwise a new tag is
selected automatically. Return the tag of the surface.
Return an integer value.
"""
api_wireTags_, api_wireTags_n_ = _ivectorint(wireTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddPlaneSurface(
api_wireTags_, api_wireTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddPlaneSurface returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addSurfaceFilling(wireTags, tag=-1, sphereCenterTag=-1):
"""
Add a surface filling the curve loops in `wireTags'. Currently only a
single curve loop is supported; this curve loop should be composed by 3 or
4 curves only. If `tag' is positive, set the tag explicitly; otherwise a
new tag is selected automatically. Return the tag of the surface.
Return an integer value.
"""
api_wireTags_, api_wireTags_n_ = _ivectorint(wireTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddSurfaceFilling(
api_wireTags_, api_wireTags_n_,
c_int(tag),
c_int(sphereCenterTag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddSurfaceFilling returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addSurfaceLoop(surfaceTags, tag=-1):
"""
Add a surface loop (a closed shell) formed by `surfaceTags'. If `tag' is
positive, set the tag explicitly; otherwise a new tag is selected
automatically. Return the tag of the shell.
Return an integer value.
"""
api_surfaceTags_, api_surfaceTags_n_ = _ivectorint(surfaceTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddSurfaceLoop(
api_surfaceTags_, api_surfaceTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddSurfaceLoop returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addVolume(shellTags, tag=-1):
"""
Add a volume (a region) defined by one or more shells `shellTags'. The
first surface loop defines the exterior boundary; additional surface loop
define holes. If `tag' is positive, set the tag explicitly; otherwise a new
tag is selected automatically. Return the tag of the volume.
Return an integer value.
"""
api_shellTags_, api_shellTags_n_ = _ivectorint(shellTags)
ierr = c_int()
api__result__ = lib.gmshModelGeoAddVolume(
api_shellTags_, api_shellTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoAddVolume returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def extrude(dimTags, dx, dy, dz, numElements=[], heights=[], recombine=False):
"""
Extrude the model entities `dimTags' by translation along (`dx', `dy',
`dz'). Return extruded entities in `outDimTags'. If `numElements' is not
empty, also extrude the mesh: the entries in `numElements' give the number
of elements in each layer. If `height' is not empty, it provides the
(cumulative) height of the different layers, normalized to 1. If `dx' ==
`dy' == `dz' == 0, the entities are extruded along their normal.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_numElements_, api_numElements_n_ = _ivectorint(numElements)
api_heights_, api_heights_n_ = _ivectordouble(heights)
ierr = c_int()
lib.gmshModelGeoExtrude(
api_dimTags_, api_dimTags_n_,
c_double(dx),
c_double(dy),
c_double(dz),
byref(api_outDimTags_), byref(api_outDimTags_n_),
api_numElements_, api_numElements_n_,
api_heights_, api_heights_n_,
c_int(bool(recombine)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoExtrude returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def revolve(dimTags, x, y, z, ax, ay, az, angle, numElements=[], heights=[], recombine=False):
"""
Extrude the model entities `dimTags' by rotation of `angle' radians around
the axis of revolution defined by the point (`x', `y', `z') and the
direction (`ax', `ay', `az'). Return extruded entities in `outDimTags'. If
`numElements' is not empty, also extrude the mesh: the entries in
`numElements' give the number of elements in each layer. If `height' is not
empty, it provides the (cumulative) height of the different layers,
normalized to 1.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_numElements_, api_numElements_n_ = _ivectorint(numElements)
api_heights_, api_heights_n_ = _ivectordouble(heights)
ierr = c_int()
lib.gmshModelGeoRevolve(
api_dimTags_, api_dimTags_n_,
c_double(x),
c_double(y),
c_double(z),
c_double(ax),
c_double(ay),
c_double(az),
c_double(angle),
byref(api_outDimTags_), byref(api_outDimTags_n_),
api_numElements_, api_numElements_n_,
api_heights_, api_heights_n_,
c_int(bool(recombine)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoRevolve returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def twist(dimTags, x, y, z, dx, dy, dz, ax, ay, az, angle, numElements=[], heights=[], recombine=False):
"""
Extrude the model entities `dimTags' by a combined translation and rotation
of `angle' radians, along (`dx', `dy', `dz') and around the axis of
revolution defined by the point (`x', `y', `z') and the direction (`ax',
`ay', `az'). Return extruded entities in `outDimTags'. If `numElements' is
not empty, also extrude the mesh: the entries in `numElements' give the
number of elements in each layer. If `height' is not empty, it provides the
(cumulative) height of the different layers, normalized to 1.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_numElements_, api_numElements_n_ = _ivectorint(numElements)
api_heights_, api_heights_n_ = _ivectordouble(heights)
ierr = c_int()
lib.gmshModelGeoTwist(
api_dimTags_, api_dimTags_n_,
c_double(x),
c_double(y),
c_double(z),
c_double(dx),
c_double(dy),
c_double(dz),
c_double(ax),
c_double(ay),
c_double(az),
c_double(angle),
byref(api_outDimTags_), byref(api_outDimTags_n_),
api_numElements_, api_numElements_n_,
api_heights_, api_heights_n_,
c_int(bool(recombine)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoTwist returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def translate(dimTags, dx, dy, dz):
"""
Translate the model entities `dimTags' along (`dx', `dy', `dz').
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelGeoTranslate(
api_dimTags_, api_dimTags_n_,
c_double(dx),
c_double(dy),
c_double(dz),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoTranslate returned non-zero error code: ",
ierr.value)
@staticmethod
def rotate(dimTags, x, y, z, ax, ay, az, angle):
"""
Rotate the model entities `dimTags' of `angle' radians around the axis of
revolution defined by the point (`x', `y', `z') and the direction (`ax',
`ay', `az').
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelGeoRotate(
api_dimTags_, api_dimTags_n_,
c_double(x),
c_double(y),
c_double(z),
c_double(ax),
c_double(ay),
c_double(az),
c_double(angle),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoRotate returned non-zero error code: ",
ierr.value)
@staticmethod
def dilate(dimTags, x, y, z, a, b, c):
"""
Scale the model entities `dimTag' by factors `a', `b' and `c' along the
three coordinate axes; use (`x', `y', `z') as the center of the homothetic
transformation.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelGeoDilate(
api_dimTags_, api_dimTags_n_,
c_double(x),
c_double(y),
c_double(z),
c_double(a),
c_double(b),
c_double(c),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoDilate returned non-zero error code: ",
ierr.value)
@staticmethod
def symmetrize(dimTags, a, b, c, d):
"""
Apply a symmetry transformation to the model entities `dimTag', with
respect to the plane of equation `a' * x + `b' * y + `c' * z + `d' = 0.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelGeoSymmetrize(
api_dimTags_, api_dimTags_n_,
c_double(a),
c_double(b),
c_double(c),
c_double(d),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoSymmetrize returned non-zero error code: ",
ierr.value)
@staticmethod
def copy(dimTags):
"""
Copy the entities `dimTags'; the new entities are returned in `outDimTags'.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelGeoCopy(
api_dimTags_, api_dimTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoCopy returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def remove(dimTags, recursive=False):
"""
Remove the entities `dimTags'. If `recursive' is true, remove all the
entities on their boundaries, down to dimension 0.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelGeoRemove(
api_dimTags_, api_dimTags_n_,
c_int(bool(recursive)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoRemove returned non-zero error code: ",
ierr.value)
@staticmethod
def removeAllDuplicates():
"""
Remove all duplicate entities (different entities at the same geometrical
location).
"""
ierr = c_int()
lib.gmshModelGeoRemoveAllDuplicates(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoRemoveAllDuplicates returned non-zero error code: ",
ierr.value)
@staticmethod
def synchronize():
"""
Synchronize the built-in CAD representation with the current Gmsh model.
This can be called at any time, but since it involves a non trivial amount
of processing, the number of synchronization points should normally be
minimized.
"""
ierr = c_int()
lib.gmshModelGeoSynchronize(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoSynchronize returned non-zero error code: ",
ierr.value)
class mesh:
"""
Built-in CAD kernel meshing constraints
"""
@staticmethod
def setSize(dimTags, size):
"""
Set a mesh size constraint on the model entities `dimTags'. Currently only
entities of dimension 0 (points) are handled.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelGeoMeshSetSize(
api_dimTags_, api_dimTags_n_,
c_double(size),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoMeshSetSize returned non-zero error code: ",
ierr.value)
@staticmethod
def setTransfiniteCurve(tag, nPoints, meshType="Progression", coef=1.):
"""
Set a transfinite meshing constraint on the curve `tag', with `numNodes'
nodes distributed according to `meshType' and `coef'. Currently supported
types are "Progression" (geometrical progression with power `coef') and
"Bump" (refinement toward both extremities of the curve).
"""
ierr = c_int()
lib.gmshModelGeoMeshSetTransfiniteCurve(
c_int(tag),
c_int(nPoints),
c_char_p(meshType.encode()),
c_double(coef),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoMeshSetTransfiniteCurve returned non-zero error code: ",
ierr.value)
@staticmethod
def setTransfiniteSurface(tag, arrangement="Left", cornerTags=[]):
"""
Set a transfinite meshing constraint on the surface `tag'. `arrangement'
describes the arrangement of the triangles when the surface is not flagged
as recombined: currently supported values are "Left", "Right",
"AlternateLeft" and "AlternateRight". `cornerTags' can be used to specify
the (3 or 4) corners of the transfinite interpolation explicitly;
specifying the corners explicitly is mandatory if the surface has more that
3 or 4 points on its boundary.
"""
api_cornerTags_, api_cornerTags_n_ = _ivectorint(cornerTags)
ierr = c_int()
lib.gmshModelGeoMeshSetTransfiniteSurface(
c_int(tag),
c_char_p(arrangement.encode()),
api_cornerTags_, api_cornerTags_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoMeshSetTransfiniteSurface returned non-zero error code: ",
ierr.value)
@staticmethod
def setTransfiniteVolume(tag, cornerTags=[]):
"""
Set a transfinite meshing constraint on the surface `tag'. `cornerTags' can
be used to specify the (6 or 8) corners of the transfinite interpolation
explicitly.
"""
api_cornerTags_, api_cornerTags_n_ = _ivectorint(cornerTags)
ierr = c_int()
lib.gmshModelGeoMeshSetTransfiniteVolume(
c_int(tag),
api_cornerTags_, api_cornerTags_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoMeshSetTransfiniteVolume returned non-zero error code: ",
ierr.value)
@staticmethod
def setRecombine(dim, tag, angle=45.):
"""
Set a recombination meshing constraint on the model entity of dimension
`dim' and tag `tag'. Currently only entities of dimension 2 (to recombine
triangles into quadrangles) are supported.
"""
ierr = c_int()
lib.gmshModelGeoMeshSetRecombine(
c_int(dim),
c_int(tag),
c_double(angle),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoMeshSetRecombine returned non-zero error code: ",
ierr.value)
@staticmethod
def setSmoothing(dim, tag, val):
"""
Set a smoothing meshing constraint on the model entity of dimension `dim'
and tag `tag'. `val' iterations of a Laplace smoother are applied.
"""
ierr = c_int()
lib.gmshModelGeoMeshSetSmoothing(
c_int(dim),
c_int(tag),
c_int(val),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoMeshSetSmoothing returned non-zero error code: ",
ierr.value)
@staticmethod
def setReverse(dim, tag, val=True):
"""
Set a reverse meshing constraint on the model entity of dimension `dim' and
tag `tag'. If `val' is true, the mesh orientation will be reversed with
respect to the natural mesh orientation (i.e. the orientation consistent
with the orientation of the geometry). If `val' is false, the mesh is left
as-is.
"""
ierr = c_int()
lib.gmshModelGeoMeshSetReverse(
c_int(dim),
c_int(tag),
c_int(bool(val)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelGeoMeshSetReverse returned non-zero error code: ",
ierr.value)
class occ:
"""
OpenCASCADE CAD kernel functions
"""
@staticmethod
def addPoint(x, y, z, meshSize=0., tag=-1):
"""
Add a geometrical point in the OpenCASCADE CAD representation, at
coordinates (`x', `y', `z'). If `meshSize' is > 0, add a meshing constraint
at that point. If `tag' is positive, set the tag explicitly; otherwise a
new tag is selected automatically. Return the tag of the point. (Note that
the point will be added in the current model only after `synchronize' is
called. This behavior holds for all the entities added in the occ module.)
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddPoint(
c_double(x),
c_double(y),
c_double(z),
c_double(meshSize),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddPoint returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addLine(startTag, endTag, tag=-1):
"""
Add a straight line segment between the two points with tags `startTag' and
`endTag'. If `tag' is positive, set the tag explicitly; otherwise a new tag
is selected automatically. Return the tag of the line.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddLine(
c_int(startTag),
c_int(endTag),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddLine returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addCircleArc(startTag, centerTag, endTag, tag=-1):
"""
Add a circle arc between the two points with tags `startTag' and `endTag',
with center `centerTag'. If `tag' is positive, set the tag explicitly;
otherwise a new tag is selected automatically. Return the tag of the circle
arc.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddCircleArc(
c_int(startTag),
c_int(centerTag),
c_int(endTag),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddCircleArc returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addCircle(x, y, z, r, tag=-1, angle1=0., angle2=2*pi):
"""
Add a circle of center (`x', `y', `z') and radius `r'. If `tag' is
positive, set the tag explicitly; otherwise a new tag is selected
automatically. If `angle1' and `angle2' are specified, create a circle arc
between the two angles. Return the tag of the circle.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddCircle(
c_double(x),
c_double(y),
c_double(z),
c_double(r),
c_int(tag),
c_double(angle1),
c_double(angle2),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddCircle returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addEllipseArc(startTag, centerTag, endTag, tag=-1):
"""
Add an ellipse arc between the major axis point `startTag' and `endTag',
with center `centerTag'. If `tag' is positive, set the tag explicitly;
otherwise a new tag is selected automatically. Return the tag of the
ellipse arc. Note that OpenCASCADE does not allow creating ellipse arcs
with the major radius smaller than the minor radius.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddEllipseArc(
c_int(startTag),
c_int(centerTag),
c_int(endTag),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddEllipseArc returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addEllipse(x, y, z, r1, r2, tag=-1, angle1=0., angle2=2*pi):
"""
Add an ellipse of center (`x', `y', `z') and radii `r1' and `r2' along the
x- and y-axes respectively. If `tag' is positive, set the tag explicitly;
otherwise a new tag is selected automatically. If `angle1' and `angle2' are
specified, create an ellipse arc between the two angles. Return the tag of
the ellipse. Note that OpenCASCADE does not allow creating ellipses with
the major radius (along the x-axis) smaller than or equal to the minor
radius (along the y-axis): rotate the shape or use `addCircle' in such
cases.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddEllipse(
c_double(x),
c_double(y),
c_double(z),
c_double(r1),
c_double(r2),
c_int(tag),
c_double(angle1),
c_double(angle2),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddEllipse returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addSpline(pointTags, tag=-1):
"""
Add a spline (C2 b-spline) curve going through the points `pointTags'. If
`tag' is positive, set the tag explicitly; otherwise a new tag is selected
automatically. Create a periodic curve if the first and last points are the
same. Return the tag of the spline curve.
Return an integer value.
"""
api_pointTags_, api_pointTags_n_ = _ivectorint(pointTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddSpline(
api_pointTags_, api_pointTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddSpline returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addBSpline(pointTags, tag=-1, degree=3, weights=[], knots=[], multiplicities=[]):
"""
Add a b-spline curve of degree `degree' with `pointTags' control points. If
`weights', `knots' or `multiplicities' are not provided, default parameters
are computed automatically. If `tag' is positive, set the tag explicitly;
otherwise a new tag is selected automatically. Create a periodic curve if
the first and last points are the same. Return the tag of the b-spline
curve.
Return an integer value.
"""
api_pointTags_, api_pointTags_n_ = _ivectorint(pointTags)
api_weights_, api_weights_n_ = _ivectordouble(weights)
api_knots_, api_knots_n_ = _ivectordouble(knots)
api_multiplicities_, api_multiplicities_n_ = _ivectorint(multiplicities)
ierr = c_int()
api__result__ = lib.gmshModelOccAddBSpline(
api_pointTags_, api_pointTags_n_,
c_int(tag),
c_int(degree),
api_weights_, api_weights_n_,
api_knots_, api_knots_n_,
api_multiplicities_, api_multiplicities_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddBSpline returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addBezier(pointTags, tag=-1):
"""
Add a Bezier curve with `pointTags' control points. If `tag' is positive,
set the tag explicitly; otherwise a new tag is selected automatically.
Return the tag of the Bezier curve.
Return an integer value.
"""
api_pointTags_, api_pointTags_n_ = _ivectorint(pointTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddBezier(
api_pointTags_, api_pointTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddBezier returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addWire(curveTags, tag=-1, checkClosed=False):
"""
Add a wire (open or closed) formed by the curves `curveTags'. Note that an
OpenCASCADE wire can be made of curves that share geometrically identical
(but topologically different) points. If `tag' is positive, set the tag
explicitly; otherwise a new tag is selected automatically. Return the tag
of the wire.
Return an integer value.
"""
api_curveTags_, api_curveTags_n_ = _ivectorint(curveTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddWire(
api_curveTags_, api_curveTags_n_,
c_int(tag),
c_int(bool(checkClosed)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddWire returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addCurveLoop(curveTags, tag=-1):
"""
Add a curve loop (a closed wire) formed by the curves `curveTags'.
`curveTags' should contain tags of curves forming a closed loop. Note that
an OpenCASCADE curve loop can be made of curves that share geometrically
identical (but topologically different) points. If `tag' is positive, set
the tag explicitly; otherwise a new tag is selected automatically. Return
the tag of the curve loop.
Return an integer value.
"""
api_curveTags_, api_curveTags_n_ = _ivectorint(curveTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddCurveLoop(
api_curveTags_, api_curveTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddCurveLoop returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addRectangle(x, y, z, dx, dy, tag=-1, roundedRadius=0.):
"""
Add a rectangle with lower left corner at (`x', `y', `z') and upper right
corner at (`x' + `dx', `y' + `dy', `z'). If `tag' is positive, set the tag
explicitly; otherwise a new tag is selected automatically. Round the
corners if `roundedRadius' is nonzero. Return the tag of the rectangle.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddRectangle(
c_double(x),
c_double(y),
c_double(z),
c_double(dx),
c_double(dy),
c_int(tag),
c_double(roundedRadius),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddRectangle returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addDisk(xc, yc, zc, rx, ry, tag=-1):
"""
Add a disk with center (`xc', `yc', `zc') and radius `rx' along the x-axis
and `ry' along the y-axis. If `tag' is positive, set the tag explicitly;
otherwise a new tag is selected automatically. Return the tag of the disk.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddDisk(
c_double(xc),
c_double(yc),
c_double(zc),
c_double(rx),
c_double(ry),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddDisk returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addPlaneSurface(wireTags, tag=-1):
"""
Add a plane surface defined by one or more curve loops (or closed wires)
`wireTags'. The first curve loop defines the exterior contour; additional
curve loop define holes. If `tag' is positive, set the tag explicitly;
otherwise a new tag is selected automatically. Return the tag of the
surface.
Return an integer value.
"""
api_wireTags_, api_wireTags_n_ = _ivectorint(wireTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddPlaneSurface(
api_wireTags_, api_wireTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddPlaneSurface returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addSurfaceFilling(wireTag, tag=-1, pointTags=[]):
"""
Add a surface filling the curve loops in `wireTags'. If `tag' is positive,
set the tag explicitly; otherwise a new tag is selected automatically.
Return the tag of the surface. If `pointTags' are provided, force the
surface to pass through the given points.
Return an integer value.
"""
api_pointTags_, api_pointTags_n_ = _ivectorint(pointTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddSurfaceFilling(
c_int(wireTag),
c_int(tag),
api_pointTags_, api_pointTags_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddSurfaceFilling returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addSurfaceLoop(surfaceTags, tag=-1, sewing=False):
"""
Add a surface loop (a closed shell) formed by `surfaceTags'. If `tag' is
positive, set the tag explicitly; otherwise a new tag is selected
automatically. Return the tag of the surface loop. Setting `sewing' allows
to build a shell made of surfaces that share geometrically identical (but
topologically different) curves.
Return an integer value.
"""
api_surfaceTags_, api_surfaceTags_n_ = _ivectorint(surfaceTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddSurfaceLoop(
api_surfaceTags_, api_surfaceTags_n_,
c_int(tag),
c_int(bool(sewing)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddSurfaceLoop returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addVolume(shellTags, tag=-1):
"""
Add a volume (a region) defined by one or more surface loops `shellTags'.
The first surface loop defines the exterior boundary; additional surface
loop define holes. If `tag' is positive, set the tag explicitly; otherwise
a new tag is selected automatically. Return the tag of the volume.
Return an integer value.
"""
api_shellTags_, api_shellTags_n_ = _ivectorint(shellTags)
ierr = c_int()
api__result__ = lib.gmshModelOccAddVolume(
api_shellTags_, api_shellTags_n_,
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddVolume returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addSphere(xc, yc, zc, radius, tag=-1, angle1=-pi/2, angle2=pi/2, angle3=2*pi):
"""
Add a sphere of center (`xc', `yc', `zc') and radius `r'. The optional
`angle1' and `angle2' arguments define the polar angle opening (from -Pi/2
to Pi/2). The optional `angle3' argument defines the azimuthal opening
(from 0 to 2*Pi). If `tag' is positive, set the tag explicitly; otherwise a
new tag is selected automatically. Return the tag of the sphere.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddSphere(
c_double(xc),
c_double(yc),
c_double(zc),
c_double(radius),
c_int(tag),
c_double(angle1),
c_double(angle2),
c_double(angle3),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddSphere returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addBox(x, y, z, dx, dy, dz, tag=-1):
"""
Add a parallelepipedic box defined by a point (`x', `y', `z') and the
extents along the x-, y- and z-axes. If `tag' is positive, set the tag
explicitly; otherwise a new tag is selected automatically. Return the tag
of the box.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddBox(
c_double(x),
c_double(y),
c_double(z),
c_double(dx),
c_double(dy),
c_double(dz),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddBox returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addCylinder(x, y, z, dx, dy, dz, r, tag=-1, angle=2*pi):
"""
Add a cylinder, defined by the center (`x', `y', `z') of its first circular
face, the 3 components (`dx', `dy', `dz') of the vector defining its axis
and its radius `r'. The optional `angle' argument defines the angular
opening (from 0 to 2*Pi). If `tag' is positive, set the tag explicitly;
otherwise a new tag is selected automatically. Return the tag of the
cylinder.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddCylinder(
c_double(x),
c_double(y),
c_double(z),
c_double(dx),
c_double(dy),
c_double(dz),
c_double(r),
c_int(tag),
c_double(angle),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddCylinder returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addCone(x, y, z, dx, dy, dz, r1, r2, tag=-1, angle=2*pi):
"""
Add a cone, defined by the center (`x', `y', `z') of its first circular
face, the 3 components of the vector (`dx', `dy', `dz') defining its axis
and the two radii `r1' and `r2' of the faces (these radii can be zero). If
`tag' is positive, set the tag explicitly; otherwise a new tag is selected
automatically. `angle' defines the optional angular opening (from 0 to
2*Pi). Return the tag of the cone.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddCone(
c_double(x),
c_double(y),
c_double(z),
c_double(dx),
c_double(dy),
c_double(dz),
c_double(r1),
c_double(r2),
c_int(tag),
c_double(angle),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddCone returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addWedge(x, y, z, dx, dy, dz, tag=-1, ltx=0.):
"""
Add a right angular wedge, defined by the right-angle point (`x', `y', `z')
and the 3 extends along the x-, y- and z-axes (`dx', `dy', `dz'). If `tag'
is positive, set the tag explicitly; otherwise a new tag is selected
automatically. The optional argument `ltx' defines the top extent along the
x-axis. Return the tag of the wedge.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddWedge(
c_double(x),
c_double(y),
c_double(z),
c_double(dx),
c_double(dy),
c_double(dz),
c_int(tag),
c_double(ltx),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddWedge returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addTorus(x, y, z, r1, r2, tag=-1, angle=2*pi):
"""
Add a torus, defined by its center (`x', `y', `z') and its 2 radii `r' and
`r2'. If `tag' is positive, set the tag explicitly; otherwise a new tag is
selected automatically. The optional argument `angle' defines the angular
opening (from 0 to 2*Pi). Return the tag of the wedge.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshModelOccAddTorus(
c_double(x),
c_double(y),
c_double(z),
c_double(r1),
c_double(r2),
c_int(tag),
c_double(angle),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddTorus returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def addThruSections(wireTags, tag=-1, makeSolid=True, makeRuled=False):
"""
Add a volume (if the optional argument `makeSolid' is set) or surfaces
defined through the open or closed wires `wireTags'. If `tag' is positive,
set the tag explicitly; otherwise a new tag is selected automatically. The
new entities are returned in `outDimTags'. If the optional argument
`makeRuled' is set, the surfaces created on the boundary are forced to be
ruled surfaces.
Return `outDimTags'.
"""
api_wireTags_, api_wireTags_n_ = _ivectorint(wireTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelOccAddThruSections(
api_wireTags_, api_wireTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
c_int(tag),
c_int(bool(makeSolid)),
c_int(bool(makeRuled)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddThruSections returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def addThickSolid(volumeTag, excludeSurfaceTags, offset, tag=-1):
"""
Add a hollowed volume built from an initial volume `volumeTag' and a set of
faces from this volume `excludeSurfaceTags', which are to be removed. The
remaining faces of the volume become the walls of the hollowed solid, with
thickness `offset'. If `tag' is positive, set the tag explicitly; otherwise
a new tag is selected automatically.
Return `outDimTags'.
"""
api_excludeSurfaceTags_, api_excludeSurfaceTags_n_ = _ivectorint(excludeSurfaceTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelOccAddThickSolid(
c_int(volumeTag),
api_excludeSurfaceTags_, api_excludeSurfaceTags_n_,
c_double(offset),
byref(api_outDimTags_), byref(api_outDimTags_n_),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddThickSolid returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def extrude(dimTags, dx, dy, dz, numElements=[], heights=[], recombine=False):
"""
Extrude the model entities `dimTags' by translation along (`dx', `dy',
`dz'). Return extruded entities in `outDimTags'. If `numElements' is not
empty, also extrude the mesh: the entries in `numElements' give the number
of elements in each layer. If `height' is not empty, it provides the
(cumulative) height of the different layers, normalized to 1.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_numElements_, api_numElements_n_ = _ivectorint(numElements)
api_heights_, api_heights_n_ = _ivectordouble(heights)
ierr = c_int()
lib.gmshModelOccExtrude(
api_dimTags_, api_dimTags_n_,
c_double(dx),
c_double(dy),
c_double(dz),
byref(api_outDimTags_), byref(api_outDimTags_n_),
api_numElements_, api_numElements_n_,
api_heights_, api_heights_n_,
c_int(bool(recombine)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccExtrude returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def revolve(dimTags, x, y, z, ax, ay, az, angle, numElements=[], heights=[], recombine=False):
"""
Extrude the model entities `dimTags' by rotation of `angle' radians around
the axis of revolution defined by the point (`x', `y', `z') and the
direction (`ax', `ay', `az'). Return extruded entities in `outDimTags'. If
`numElements' is not empty, also extrude the mesh: the entries in
`numElements' give the number of elements in each layer. If `height' is not
empty, it provides the (cumulative) height of the different layers,
normalized to 1.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_numElements_, api_numElements_n_ = _ivectorint(numElements)
api_heights_, api_heights_n_ = _ivectordouble(heights)
ierr = c_int()
lib.gmshModelOccRevolve(
api_dimTags_, api_dimTags_n_,
c_double(x),
c_double(y),
c_double(z),
c_double(ax),
c_double(ay),
c_double(az),
c_double(angle),
byref(api_outDimTags_), byref(api_outDimTags_n_),
api_numElements_, api_numElements_n_,
api_heights_, api_heights_n_,
c_int(bool(recombine)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccRevolve returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def addPipe(dimTags, wireTag):
"""
Add a pipe by extruding the entities `dimTags' along the wire `wireTag'.
Return the pipe in `outDimTags'.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelOccAddPipe(
api_dimTags_, api_dimTags_n_,
c_int(wireTag),
byref(api_outDimTags_), byref(api_outDimTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAddPipe returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def fillet(volumeTags, curveTags, radii, removeVolume=True):
"""
Fillet the volumes `volumeTags' on the curves `curveTags' with radii
`radii'. The `radii' vector can either contain a single radius, as many
radii as `curveTags', or twice as many as `curveTags' (in which case
different radii are provided for the begin and end points of the curves).
Return the filleted entities in `outDimTags'. Remove the original volume if
`removeVolume' is set.
Return `outDimTags'.
"""
api_volumeTags_, api_volumeTags_n_ = _ivectorint(volumeTags)
api_curveTags_, api_curveTags_n_ = _ivectorint(curveTags)
api_radii_, api_radii_n_ = _ivectordouble(radii)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelOccFillet(
api_volumeTags_, api_volumeTags_n_,
api_curveTags_, api_curveTags_n_,
api_radii_, api_radii_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
c_int(bool(removeVolume)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccFillet returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def chamfer(volumeTags, curveTags, surfaceTags, distances, removeVolume=True):
"""
Chamfer the volumes `volumeTags' on the curves `curveTags' with distances
`distances' measured on surfaces `surfaceTags'. The `distances' vector can
either contain a single distance, as many distances as `curveTags' and
`surfaceTags', or twice as many as `curveTags' and `surfaceTags' (in which
case the first in each pair is measured on the corresponding surface in
`surfaceTags', the other on the other adjacent surface). Return the
chamfered entities in `outDimTags'. Remove the original volume if
`removeVolume' is set.
Return `outDimTags'.
"""
api_volumeTags_, api_volumeTags_n_ = _ivectorint(volumeTags)
api_curveTags_, api_curveTags_n_ = _ivectorint(curveTags)
api_surfaceTags_, api_surfaceTags_n_ = _ivectorint(surfaceTags)
api_distances_, api_distances_n_ = _ivectordouble(distances)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelOccChamfer(
api_volumeTags_, api_volumeTags_n_,
api_curveTags_, api_curveTags_n_,
api_surfaceTags_, api_surfaceTags_n_,
api_distances_, api_distances_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
c_int(bool(removeVolume)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccChamfer returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def fuse(objectDimTags, toolDimTags, tag=-1, removeObject=True, removeTool=True):
"""
Compute the boolean union (the fusion) of the entities `objectDimTags' and
`toolDimTags'. Return the resulting entities in `outDimTags'. If `tag' is
positive, try to set the tag explicitly (only valid if the boolean
operation results in a single entity). Remove the object if `removeObject'
is set. Remove the tool if `removeTool' is set.
Return `outDimTags', `outDimTagsMap'.
"""
api_objectDimTags_, api_objectDimTags_n_ = _ivectorpair(objectDimTags)
api_toolDimTags_, api_toolDimTags_n_ = _ivectorpair(toolDimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_ = POINTER(POINTER(c_int))(), POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelOccFuse(
api_objectDimTags_, api_objectDimTags_n_,
api_toolDimTags_, api_toolDimTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
byref(api_outDimTagsMap_), byref(api_outDimTagsMap_n_), byref(api_outDimTagsMap_nn_),
c_int(tag),
c_int(bool(removeObject)),
c_int(bool(removeTool)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccFuse returned non-zero error code: ",
ierr.value)
return (
_ovectorpair(api_outDimTags_, api_outDimTags_n_.value),
_ovectorvectorpair(api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_))
@staticmethod
def intersect(objectDimTags, toolDimTags, tag=-1, removeObject=True, removeTool=True):
"""
Compute the boolean intersection (the common parts) of the entities
`objectDimTags' and `toolDimTags'. Return the resulting entities in
`outDimTags'. If `tag' is positive, try to set the tag explicitly (only
valid if the boolean operation results in a single entity). Remove the
object if `removeObject' is set. Remove the tool if `removeTool' is set.
Return `outDimTags', `outDimTagsMap'.
"""
api_objectDimTags_, api_objectDimTags_n_ = _ivectorpair(objectDimTags)
api_toolDimTags_, api_toolDimTags_n_ = _ivectorpair(toolDimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_ = POINTER(POINTER(c_int))(), POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelOccIntersect(
api_objectDimTags_, api_objectDimTags_n_,
api_toolDimTags_, api_toolDimTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
byref(api_outDimTagsMap_), byref(api_outDimTagsMap_n_), byref(api_outDimTagsMap_nn_),
c_int(tag),
c_int(bool(removeObject)),
c_int(bool(removeTool)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccIntersect returned non-zero error code: ",
ierr.value)
return (
_ovectorpair(api_outDimTags_, api_outDimTags_n_.value),
_ovectorvectorpair(api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_))
@staticmethod
def cut(objectDimTags, toolDimTags, tag=-1, removeObject=True, removeTool=True):
"""
Compute the boolean difference between the entities `objectDimTags' and
`toolDimTags'. Return the resulting entities in `outDimTags'. If `tag' is
positive, try to set the tag explicitly (only valid if the boolean
operation results in a single entity). Remove the object if `removeObject'
is set. Remove the tool if `removeTool' is set.
Return `outDimTags', `outDimTagsMap'.
"""
api_objectDimTags_, api_objectDimTags_n_ = _ivectorpair(objectDimTags)
api_toolDimTags_, api_toolDimTags_n_ = _ivectorpair(toolDimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_ = POINTER(POINTER(c_int))(), POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelOccCut(
api_objectDimTags_, api_objectDimTags_n_,
api_toolDimTags_, api_toolDimTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
byref(api_outDimTagsMap_), byref(api_outDimTagsMap_n_), byref(api_outDimTagsMap_nn_),
c_int(tag),
c_int(bool(removeObject)),
c_int(bool(removeTool)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccCut returned non-zero error code: ",
ierr.value)
return (
_ovectorpair(api_outDimTags_, api_outDimTags_n_.value),
_ovectorvectorpair(api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_))
@staticmethod
def fragment(objectDimTags, toolDimTags, tag=-1, removeObject=True, removeTool=True):
"""
Compute the boolean fragments (general fuse) of the entities
`objectDimTags' and `toolDimTags'. Return the resulting entities in
`outDimTags'. If `tag' is positive, try to set the tag explicitly (only
valid if the boolean operation results in a single entity). Remove the
object if `removeObject' is set. Remove the tool if `removeTool' is set.
Return `outDimTags', `outDimTagsMap'.
"""
api_objectDimTags_, api_objectDimTags_n_ = _ivectorpair(objectDimTags)
api_toolDimTags_, api_toolDimTags_n_ = _ivectorpair(toolDimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_ = POINTER(POINTER(c_int))(), POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshModelOccFragment(
api_objectDimTags_, api_objectDimTags_n_,
api_toolDimTags_, api_toolDimTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
byref(api_outDimTagsMap_), byref(api_outDimTagsMap_n_), byref(api_outDimTagsMap_nn_),
c_int(tag),
c_int(bool(removeObject)),
c_int(bool(removeTool)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccFragment returned non-zero error code: ",
ierr.value)
return (
_ovectorpair(api_outDimTags_, api_outDimTags_n_.value),
_ovectorvectorpair(api_outDimTagsMap_, api_outDimTagsMap_n_, api_outDimTagsMap_nn_))
@staticmethod
def translate(dimTags, dx, dy, dz):
"""
Translate the model entities `dimTags' along (`dx', `dy', `dz').
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelOccTranslate(
api_dimTags_, api_dimTags_n_,
c_double(dx),
c_double(dy),
c_double(dz),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccTranslate returned non-zero error code: ",
ierr.value)
@staticmethod
def rotate(dimTags, x, y, z, ax, ay, az, angle):
"""
Rotate the model entities `dimTags' of `angle' radians around the axis of
revolution defined by the point (`x', `y', `z') and the direction (`ax',
`ay', `az').
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelOccRotate(
api_dimTags_, api_dimTags_n_,
c_double(x),
c_double(y),
c_double(z),
c_double(ax),
c_double(ay),
c_double(az),
c_double(angle),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccRotate returned non-zero error code: ",
ierr.value)
@staticmethod
def dilate(dimTags, x, y, z, a, b, c):
"""
Scale the model entities `dimTag' by factors `a', `b' and `c' along the
three coordinate axes; use (`x', `y', `z') as the center of the homothetic
transformation.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelOccDilate(
api_dimTags_, api_dimTags_n_,
c_double(x),
c_double(y),
c_double(z),
c_double(a),
c_double(b),
c_double(c),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccDilate returned non-zero error code: ",
ierr.value)
@staticmethod
def symmetrize(dimTags, a, b, c, d):
"""
Apply a symmetry transformation to the model entities `dimTag', with
respect to the plane of equation `a' * x + `b' * y + `c' * z + `d' = 0.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelOccSymmetrize(
api_dimTags_, api_dimTags_n_,
c_double(a),
c_double(b),
c_double(c),
c_double(d),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccSymmetrize returned non-zero error code: ",
ierr.value)
@staticmethod
def affineTransform(dimTags, a):
"""
Apply a general affine transformation matrix `a' (16 entries of a 4x4
matrix, by row; only the 12 first can be provided for convenience) to the
model entities `dimTag'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_a_, api_a_n_ = _ivectordouble(a)
ierr = c_int()
lib.gmshModelOccAffineTransform(
api_dimTags_, api_dimTags_n_,
api_a_, api_a_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccAffineTransform returned non-zero error code: ",
ierr.value)
@staticmethod
def copy(dimTags):
"""
Copy the entities `dimTags'; the new entities are returned in `outDimTags'.
Return `outDimTags'.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelOccCopy(
api_dimTags_, api_dimTags_n_,
byref(api_outDimTags_), byref(api_outDimTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccCopy returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def remove(dimTags, recursive=False):
"""
Remove the entities `dimTags'. If `recursive' is true, remove all the
entities on their boundaries, down to dimension 0.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelOccRemove(
api_dimTags_, api_dimTags_n_,
c_int(bool(recursive)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccRemove returned non-zero error code: ",
ierr.value)
@staticmethod
def removeAllDuplicates():
"""
Remove all duplicate entities (different entities at the same geometrical
location) after intersecting (using boolean fragments) all highest
dimensional entities.
"""
ierr = c_int()
lib.gmshModelOccRemoveAllDuplicates(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccRemoveAllDuplicates returned non-zero error code: ",
ierr.value)
@staticmethod
def healShapes(dimTags=[], tolerance=1e-8, fixDegenerated=True, fixSmallEdges=True, fixSmallFaces=True, sewFaces=True):
"""
Apply various healing procedures to the entities `dimTags' (or to all the
entities in the model if `dimTags' is empty). Return the healed entities in
`outDimTags'. Available healing options are listed in the Gmsh reference
manual.
Return `outDimTags'.
"""
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelOccHealShapes(
byref(api_outDimTags_), byref(api_outDimTags_n_),
api_dimTags_, api_dimTags_n_,
c_double(tolerance),
c_int(bool(fixDegenerated)),
c_int(bool(fixSmallEdges)),
c_int(bool(fixSmallFaces)),
c_int(bool(sewFaces)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccHealShapes returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def importShapes(fileName, highestDimOnly=True, format=""):
"""
Import BREP, STEP or IGES shapes from the file `fileName'. The imported
entities are returned in `outDimTags'. If the optional argument
`highestDimOnly' is set, only import the highest dimensional entities in
the file. The optional argument `format' can be used to force the format of
the file (currently "brep", "step" or "iges").
Return `outDimTags'.
"""
api_outDimTags_, api_outDimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshModelOccImportShapes(
c_char_p(fileName.encode()),
byref(api_outDimTags_), byref(api_outDimTags_n_),
c_int(bool(highestDimOnly)),
c_char_p(format.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccImportShapes returned non-zero error code: ",
ierr.value)
return _ovectorpair(api_outDimTags_, api_outDimTags_n_.value)
@staticmethod
def setMeshSize(dimTags, size):
"""
Set a mesh size constraint on the model entities `dimTags'. Currently only
entities of dimension 0 (points) are handled.
"""
api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)
ierr = c_int()
lib.gmshModelOccSetMeshSize(
api_dimTags_, api_dimTags_n_,
c_double(size),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccSetMeshSize returned non-zero error code: ",
ierr.value)
@staticmethod
def getMass(dim, tag):
"""
Get the mass of the model entity of dimension `dim' and tag `tag'.
Return `mass'.
"""
api_mass_ = c_double()
ierr = c_int()
lib.gmshModelOccGetMass(
c_int(dim),
c_int(tag),
byref(api_mass_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccGetMass returned non-zero error code: ",
ierr.value)
return api_mass_.value
@staticmethod
def getCenterOfMass(dim, tag):
"""
Get the center of mass of the model entity of dimension `dim' and tag
`tag'.
Return `x', `y', `z'.
"""
api_x_ = c_double()
api_y_ = c_double()
api_z_ = c_double()
ierr = c_int()
lib.gmshModelOccGetCenterOfMass(
c_int(dim),
c_int(tag),
byref(api_x_),
byref(api_y_),
byref(api_z_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccGetCenterOfMass returned non-zero error code: ",
ierr.value)
return (
api_x_.value,
api_y_.value,
api_z_.value)
@staticmethod
def getMatrixOfInertia(dim, tag):
"""
Get the matrix of inertia (by row) of the model entity of dimension `dim'
and tag `tag'.
Return `mat'.
"""
api_mat_, api_mat_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshModelOccGetMatrixOfInertia(
c_int(dim),
c_int(tag),
byref(api_mat_), byref(api_mat_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccGetMatrixOfInertia returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_mat_, api_mat_n_.value)
@staticmethod
def synchronize():
"""
Synchronize the OpenCASCADE CAD representation with the current Gmsh model.
This can be called at any time, but since it involves a non trivial amount
of processing, the number of synchronization points should normally be
minimized.
"""
ierr = c_int()
lib.gmshModelOccSynchronize(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshModelOccSynchronize returned non-zero error code: ",
ierr.value)
class view:
"""
Post-processing view functions
"""
@staticmethod
def add(name, tag=-1):
"""
Add a new post-processing view, with name `name'. If `tag' is positive use
it (and remove the view with that tag if it already exists), otherwise
associate a new tag. Return the view tag.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshViewAdd(
c_char_p(name.encode()),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewAdd returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def remove(tag):
"""
Remove the view with tag `tag'.
"""
ierr = c_int()
lib.gmshViewRemove(
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewRemove returned non-zero error code: ",
ierr.value)
@staticmethod
def getIndex(tag):
"""
Get the index of the view with tag `tag' in the list of currently loaded
views. This dynamic index (it can change when views are removed) is used to
access view options.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshViewGetIndex(
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewGetIndex returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def getTags():
"""
Get the tags of all views.
Return `tags'.
"""
api_tags_, api_tags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
lib.gmshViewGetTags(
byref(api_tags_), byref(api_tags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewGetTags returned non-zero error code: ",
ierr.value)
return _ovectorint(api_tags_, api_tags_n_.value)
@staticmethod
def addModelData(tag, step, modelName, dataType, tags, data, time=0., numComponents=-1, partition=0):
"""
Add model-based post-processing data to the view with tag `tag'.
`modelName' identifies the model the data is attached to. `dataType'
specifies the type of data, currently either "NodeData", "ElementData" or
"ElementNodeData". `step' specifies the identifier (>= 0) of the data in a
sequence. `tags' gives the tags of the nodes or elements in the mesh to
which the data is associated. `data' is a vector of the same length as
`tags': each entry is the vector of double precision numbers representing
the data associated with the corresponding tag. The optional `time'
argument associate a time value with the data. `numComponents' gives the
number of data components (1 for scalar data, 3 for vector data, etc.) per
entity; if negative, it is automatically inferred (when possible) from the
input data. `partition' allows to specify data in several sub-sets.
"""
api_tags_, api_tags_n_ = _ivectorsize(tags)
api_data_, api_data_n_, api_data_nn_ = _ivectorvectordouble(data)
ierr = c_int()
lib.gmshViewAddModelData(
c_int(tag),
c_int(step),
c_char_p(modelName.encode()),
c_char_p(dataType.encode()),
api_tags_, api_tags_n_,
api_data_, api_data_n_, api_data_nn_,
c_double(time),
c_int(numComponents),
c_int(partition),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewAddModelData returned non-zero error code: ",
ierr.value)
@staticmethod
def getModelData(tag, step):
"""
Get model-based post-processing data from the view with tag `tag' at step
`step'. Return the `data' associated to the nodes or the elements with tags
`tags', as well as the `dataType' and the number of components
`numComponents'.
Return `dataType', `tags', `data', `time', `numComponents'.
"""
api_dataType_ = c_char_p()
api_tags_, api_tags_n_ = POINTER(c_size_t)(), c_size_t()
api_data_, api_data_n_, api_data_nn_ = POINTER(POINTER(c_double))(), POINTER(c_size_t)(), c_size_t()
api_time_ = c_double()
api_numComponents_ = c_int()
ierr = c_int()
lib.gmshViewGetModelData(
c_int(tag),
c_int(step),
byref(api_dataType_),
byref(api_tags_), byref(api_tags_n_),
byref(api_data_), byref(api_data_n_), byref(api_data_nn_),
byref(api_time_),
byref(api_numComponents_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewGetModelData returned non-zero error code: ",
ierr.value)
return (
_ostring(api_dataType_),
_ovectorsize(api_tags_, api_tags_n_.value),
_ovectorvectordouble(api_data_, api_data_n_, api_data_nn_),
api_time_.value,
api_numComponents_.value)
@staticmethod
def addListData(tag, dataType, numEle, data):
"""
Add list-based post-processing data to the view with tag `tag'. `dataType'
identifies the data: "SP" for scalar points, "VP", for vector points, etc.
`numEle' gives the number of elements in the data. `data' contains the data
for the `numEle' elements.
"""
api_data_, api_data_n_ = _ivectordouble(data)
ierr = c_int()
lib.gmshViewAddListData(
c_int(tag),
c_char_p(dataType.encode()),
c_int(numEle),
api_data_, api_data_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewAddListData returned non-zero error code: ",
ierr.value)
@staticmethod
def getListData(tag):
"""
Get list-based post-processing data from the view with tag `tag'. Return
the types `dataTypes', the number of elements `numElements' for each data
type and the `data' for each data type.
Return `dataType', `numElements', `data'.
"""
api_dataType_, api_dataType_n_ = POINTER(POINTER(c_char))(), c_size_t()
api_numElements_, api_numElements_n_ = POINTER(c_int)(), c_size_t()
api_data_, api_data_n_, api_data_nn_ = POINTER(POINTER(c_double))(), POINTER(c_size_t)(), c_size_t()
ierr = c_int()
lib.gmshViewGetListData(
c_int(tag),
byref(api_dataType_), byref(api_dataType_n_),
byref(api_numElements_), byref(api_numElements_n_),
byref(api_data_), byref(api_data_n_), byref(api_data_nn_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewGetListData returned non-zero error code: ",
ierr.value)
return (
_ovectorstring(api_dataType_, api_dataType_n_.value),
_ovectorint(api_numElements_, api_numElements_n_.value),
_ovectorvectordouble(api_data_, api_data_n_, api_data_nn_))
@staticmethod
def addAlias(refTag, copyOptions=False, tag=-1):
"""
Add a post-processing view as an `alias' of the reference view with tag
`refTag'. If `copyOptions' is set, copy the options of the reference view.
If `tag' is positive use it (and remove the view with that tag if it
already exists), otherwise associate a new tag. Return the view tag.
Return an integer value.
"""
ierr = c_int()
api__result__ = lib.gmshViewAddAlias(
c_int(refTag),
c_int(bool(copyOptions)),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewAddAlias returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def copyOptions(refTag, tag):
"""
Copy the options from the view with tag `refTag' to the view with tag
`tag'.
"""
ierr = c_int()
lib.gmshViewCopyOptions(
c_int(refTag),
c_int(tag),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewCopyOptions returned non-zero error code: ",
ierr.value)
@staticmethod
def combine(what, how, remove=False):
"""
Combine elements (if `what' == "elements") or steps (if `what' == "steps")
of all views (`how' == "all"), all visible views (`how' == "visible") or
all views having the same name (`how' == "name"). Remove original views if
`remove' is set.
"""
ierr = c_int()
lib.gmshViewCombine(
c_char_p(what.encode()),
c_char_p(how.encode()),
c_int(bool(remove)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewCombine returned non-zero error code: ",
ierr.value)
@staticmethod
def probe(tag, x, y, z, step=-1, numComp=-1, gradient=False, tolerance=0., xElemCoord=[], yElemCoord=[], zElemCoord=[]):
"""
Probe the view `tag' for its `value' at point (`x', `y', `z'). Return only
the value at step `step' is `step' is positive. Return only values with
`numComp' if `numComp' is positive. Return the gradient of the `value' if
`gradient' is set. Probes with a geometrical tolerance (in the reference
unit cube) of `tolerance' if `tolerance' is not zero. Return the result
from the element described by its coordinates if `xElementCoord',
`yElementCoord' and `zElementCoord' are provided.
Return `value'.
"""
api_value_, api_value_n_ = POINTER(c_double)(), c_size_t()
api_xElemCoord_, api_xElemCoord_n_ = _ivectordouble(xElemCoord)
api_yElemCoord_, api_yElemCoord_n_ = _ivectordouble(yElemCoord)
api_zElemCoord_, api_zElemCoord_n_ = _ivectordouble(zElemCoord)
ierr = c_int()
lib.gmshViewProbe(
c_int(tag),
c_double(x),
c_double(y),
c_double(z),
byref(api_value_), byref(api_value_n_),
c_int(step),
c_int(numComp),
c_int(bool(gradient)),
c_double(tolerance),
api_xElemCoord_, api_xElemCoord_n_,
api_yElemCoord_, api_yElemCoord_n_,
api_zElemCoord_, api_zElemCoord_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewProbe returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_value_, api_value_n_.value)
@staticmethod
def write(tag, fileName, append=False):
"""
Write the view to a file `fileName'. The export format is determined by the
file extension. Append to the file if `append' is set.
"""
ierr = c_int()
lib.gmshViewWrite(
c_int(tag),
c_char_p(fileName.encode()),
c_int(bool(append)),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshViewWrite returned non-zero error code: ",
ierr.value)
class plugin:
"""
Plugin functions
"""
@staticmethod
def setNumber(name, option, value):
"""
Set the numerical option `option' to the value `value' for plugin `name'.
"""
ierr = c_int()
lib.gmshPluginSetNumber(
c_char_p(name.encode()),
c_char_p(option.encode()),
c_double(value),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshPluginSetNumber returned non-zero error code: ",
ierr.value)
@staticmethod
def setString(name, option, value):
"""
Set the string option `option' to the value `value' for plugin `name'.
"""
ierr = c_int()
lib.gmshPluginSetString(
c_char_p(name.encode()),
c_char_p(option.encode()),
c_char_p(value.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshPluginSetString returned non-zero error code: ",
ierr.value)
@staticmethod
def run(name):
"""
Run the plugin `name'.
"""
ierr = c_int()
lib.gmshPluginRun(
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshPluginRun returned non-zero error code: ",
ierr.value)
class graphics:
"""
Graphics functions
"""
@staticmethod
def draw():
"""
Draw all the OpenGL scenes.
"""
ierr = c_int()
lib.gmshGraphicsDraw(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshGraphicsDraw returned non-zero error code: ",
ierr.value)
class fltk:
"""
FLTK graphical user interface functions
"""
@staticmethod
def initialize():
"""
Create the FLTK graphical user interface. Can only be called in the main
thread.
"""
ierr = c_int()
lib.gmshFltkInitialize(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkInitialize returned non-zero error code: ",
ierr.value)
@staticmethod
def wait(time=-1.):
"""
Wait at most `time' seconds for user interface events and return. If `time'
< 0, wait indefinitely. First automatically create the user interface if it
has not yet been initialized. Can only be called in the main thread.
"""
ierr = c_int()
lib.gmshFltkWait(
c_double(time),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkWait returned non-zero error code: ",
ierr.value)
@staticmethod
def update():
"""
Update the user interface (potentially creating new widgets and windows).
First automatically create the user interface if it has not yet been
initialized. Can only be called in the main thread: use `awake("update")'
to trigger an update of the user interface from another thread.
"""
ierr = c_int()
lib.gmshFltkUpdate(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkUpdate returned non-zero error code: ",
ierr.value)
@staticmethod
def awake(action=""):
"""
Awake the main user interface thread and process pending events, and
optionally perform an action (currently the only `action' allowed is
"update").
"""
ierr = c_int()
lib.gmshFltkAwake(
c_char_p(action.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkAwake returned non-zero error code: ",
ierr.value)
@staticmethod
def lock():
"""
Block the current thread until it can safely modify the user interface.
"""
ierr = c_int()
lib.gmshFltkLock(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkLock returned non-zero error code: ",
ierr.value)
@staticmethod
def unlock():
"""
Release the lock that was set using lock.
"""
ierr = c_int()
lib.gmshFltkUnlock(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkUnlock returned non-zero error code: ",
ierr.value)
@staticmethod
def run():
"""
Run the event loop of the graphical user interface, i.e. repeatedly calls
`wait()'. First automatically create the user interface if it has not yet
been initialized. Can only be called in the main thread.
"""
ierr = c_int()
lib.gmshFltkRun(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkRun returned non-zero error code: ",
ierr.value)
@staticmethod
def selectEntities(dim=-1):
"""
Select entities in the user interface. If `dim' is >= 0, return only the
entities of the specified dimension (e.g. points if `dim' == 0).
Return an integer value, `dimTags'.
"""
api_dimTags_, api_dimTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
api__result__ = lib.gmshFltkSelectEntities(
byref(api_dimTags_), byref(api_dimTags_n_),
c_int(dim),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkSelectEntities returned non-zero error code: ",
ierr.value)
return (
api__result__,
_ovectorpair(api_dimTags_, api_dimTags_n_.value))
@staticmethod
def selectElements():
"""
Select elements in the user interface.
Return an integer value, `elementTags'.
"""
api_elementTags_, api_elementTags_n_ = POINTER(c_size_t)(), c_size_t()
ierr = c_int()
api__result__ = lib.gmshFltkSelectElements(
byref(api_elementTags_), byref(api_elementTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkSelectElements returned non-zero error code: ",
ierr.value)
return (
api__result__,
_ovectorsize(api_elementTags_, api_elementTags_n_.value))
@staticmethod
def selectViews():
"""
Select views in the user interface.
Return an integer value, `viewTags'.
"""
api_viewTags_, api_viewTags_n_ = POINTER(c_int)(), c_size_t()
ierr = c_int()
api__result__ = lib.gmshFltkSelectViews(
byref(api_viewTags_), byref(api_viewTags_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshFltkSelectViews returned non-zero error code: ",
ierr.value)
return (
api__result__,
_ovectorint(api_viewTags_, api_viewTags_n_.value))
class onelab:
"""
ONELAB server functions
"""
@staticmethod
def set(data, format="json"):
"""
Set one or more parameters in the ONELAB database, encoded in `format'.
"""
ierr = c_int()
lib.gmshOnelabSet(
c_char_p(data.encode()),
c_char_p(format.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabSet returned non-zero error code: ",
ierr.value)
@staticmethod
def get(name="", format="json"):
"""
Get all the parameters (or a single one if `name' is specified) from the
ONELAB database, encoded in `format'.
Return `data'.
"""
api_data_ = c_char_p()
ierr = c_int()
lib.gmshOnelabGet(
byref(api_data_),
c_char_p(name.encode()),
c_char_p(format.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabGet returned non-zero error code: ",
ierr.value)
return _ostring(api_data_)
@staticmethod
def setNumber(name, value):
"""
Set the value of the number parameter `name' in the ONELAB database. Create
the parameter if it does not exist; update the value if the parameter
exists.
"""
api_value_, api_value_n_ = _ivectordouble(value)
ierr = c_int()
lib.gmshOnelabSetNumber(
c_char_p(name.encode()),
api_value_, api_value_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabSetNumber returned non-zero error code: ",
ierr.value)
@staticmethod
def setString(name, value):
"""
Set the value of the string parameter `name' in the ONELAB database. Create
the parameter if it does not exist; update the value if the parameter
exists.
"""
api_value_, api_value_n_ = _ivectorstring(value)
ierr = c_int()
lib.gmshOnelabSetString(
c_char_p(name.encode()),
api_value_, api_value_n_,
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabSetString returned non-zero error code: ",
ierr.value)
@staticmethod
def getNumber(name):
"""
Get the value of the number parameter `name' from the ONELAB database.
Return an empty vector if the parameter does not exist.
Return `value'.
"""
api_value_, api_value_n_ = POINTER(c_double)(), c_size_t()
ierr = c_int()
lib.gmshOnelabGetNumber(
c_char_p(name.encode()),
byref(api_value_), byref(api_value_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabGetNumber returned non-zero error code: ",
ierr.value)
return _ovectordouble(api_value_, api_value_n_.value)
@staticmethod
def getString(name):
"""
Get the value of the string parameter `name' from the ONELAB database.
Return an empty vector if the parameter does not exist.
Return `value'.
"""
api_value_, api_value_n_ = POINTER(POINTER(c_char))(), c_size_t()
ierr = c_int()
lib.gmshOnelabGetString(
c_char_p(name.encode()),
byref(api_value_), byref(api_value_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabGetString returned non-zero error code: ",
ierr.value)
return _ovectorstring(api_value_, api_value_n_.value)
@staticmethod
def clear(name=""):
"""
Clear the ONELAB database, or remove a single parameter if `name' is given.
"""
ierr = c_int()
lib.gmshOnelabClear(
c_char_p(name.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabClear returned non-zero error code: ",
ierr.value)
@staticmethod
def run(name="", command=""):
"""
Run a ONELAB client. If `name' is provided, create a new ONELAB client with
name `name' and executes `command'. If not, try to run a client that might
be linked to the processed input files.
"""
ierr = c_int()
lib.gmshOnelabRun(
c_char_p(name.encode()),
c_char_p(command.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshOnelabRun returned non-zero error code: ",
ierr.value)
class logger:
"""
Information logging functions
"""
@staticmethod
def write(message, level="info"):
"""
Write a `message'. `level' can be "info", "warning" or "error".
"""
ierr = c_int()
lib.gmshLoggerWrite(
c_char_p(message.encode()),
c_char_p(level.encode()),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshLoggerWrite returned non-zero error code: ",
ierr.value)
@staticmethod
def start():
"""
Start logging messages.
"""
ierr = c_int()
lib.gmshLoggerStart(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshLoggerStart returned non-zero error code: ",
ierr.value)
@staticmethod
def get():
"""
Get logged messages.
Return `log'.
"""
api_log_, api_log_n_ = POINTER(POINTER(c_char))(), c_size_t()
ierr = c_int()
lib.gmshLoggerGet(
byref(api_log_), byref(api_log_n_),
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshLoggerGet returned non-zero error code: ",
ierr.value)
return _ovectorstring(api_log_, api_log_n_.value)
@staticmethod
def stop():
"""
Stop logging messages.
"""
ierr = c_int()
lib.gmshLoggerStop(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshLoggerStop returned non-zero error code: ",
ierr.value)
@staticmethod
def time():
"""
Return wall clock time.
Return a floating point value.
"""
ierr = c_int()
api__result__ = lib.gmshLoggerTime(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshLoggerTime returned non-zero error code: ",
ierr.value)
return api__result__
@staticmethod
def cputime():
"""
Return CPU time.
Return a floating point value.
"""
ierr = c_int()
api__result__ = lib.gmshLoggerCputime(
byref(ierr))
if ierr.value != 0:
raise ValueError(
"gmshLoggerCputime returned non-zero error code: ",
ierr.value)
return api__result__
| [
"ctypes.util.find_library",
"os.path.realpath",
"numpy.ascontiguousarray",
"os.path.exists",
"numpy.ctypeslib.as_array",
"platform.system",
"signal.signal",
"backports.weakref.finalize",
"os.path.join"
] | [((708, 752), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (721, 752), False, 'import signal\n'), ((778, 804), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (794, 804), False, 'import os\n'), ((809, 826), 'platform.system', 'platform.system', ([], {}), '()\n', (824, 826), False, 'import platform\n'), ((855, 891), 'os.path.join', 'os.path.join', (['libdir', '"""gmsh-4.4.dll"""'], {}), "(libdir, 'gmsh-4.4.dll')\n", (867, 891), False, 'import os\n'), ((1043, 1066), 'os.path.exists', 'os.path.exists', (['libpath'], {}), '(libpath)\n', (1057, 1066), False, 'import os\n'), ((1082, 1102), 'ctypes.util.find_library', 'find_library', (['"""gmsh"""'], {}), "('gmsh')\n", (1094, 1102), False, 'from ctypes.util import find_library\n'), ((897, 914), 'platform.system', 'platform.system', ([], {}), '()\n', (912, 914), False, 'import platform\n'), ((942, 979), 'os.path.join', 'os.path.join', (['libdir', '"""libgmsh.dylib"""'], {}), "(libdir, 'libgmsh.dylib')\n", (954, 979), False, 'import os\n'), ((1000, 1034), 'os.path.join', 'os.path.join', (['libdir', '"""libgmsh.so"""'], {}), "(libdir, 'libgmsh.so')\n", (1012, 1034), False, 'import os\n'), ((1795, 1833), 'numpy.ctypeslib.as_array', 'numpy.ctypeslib.as_array', (['ptr', '(size,)'], {}), '(ptr, (size,))\n', (1819, 1833), False, 'import numpy\n'), ((1843, 1880), 'backports.weakref.finalize', 'weakreffinalize', (['v', 'lib.gmshFree', 'ptr'], {}), '(v, lib.gmshFree, ptr)\n', (1858, 1880), True, 'from backports.weakref import finalize as weakreffinalize\n'), ((2036, 2074), 'numpy.ctypeslib.as_array', 'numpy.ctypeslib.as_array', (['ptr', '(size,)'], {}), '(ptr, (size,))\n', (2060, 2074), False, 'import numpy\n'), ((2084, 2121), 'backports.weakref.finalize', 'weakreffinalize', (['v', 'lib.gmshFree', 'ptr'], {}), '(v, lib.gmshFree, ptr)\n', (2099, 2121), True, 'from backports.weakref import finalize as weakreffinalize\n'), ((2279, 2317), 'numpy.ctypeslib.as_array', 'numpy.ctypeslib.as_array', (['ptr', '(size,)'], {}), '(ptr, (size,))\n', (2303, 2317), False, 'import numpy\n'), ((2327, 2364), 'backports.weakref.finalize', 'weakreffinalize', (['v', 'lib.gmshFree', 'ptr'], {}), '(v, lib.gmshFree, ptr)\n', (2342, 2364), True, 'from backports.weakref import finalize as weakreffinalize\n'), ((3741, 3782), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['o', 'numpy.float64'], {}), '(o, numpy.float64)\n', (3764, 3782), False, 'import numpy\n'), ((3994, 4033), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['o', 'numpy.int32'], {}), '(o, numpy.int32)\n', (4017, 4033), False, 'import numpy\n'), ((3367, 3406), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['o', 'numpy.int32'], {}), '(o, numpy.int32)\n', (3390, 3406), False, 'import numpy\n'), ((3551, 3590), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['o', 'numpy.uintp'], {}), '(o, numpy.uintp)\n', (3574, 3590), False, 'import numpy\n')] |
import numpy as np
from scipy.linalg import solve
import sympy
import random
import binascii
import time
#Creating Hadamard Matrices
H2 = np.array([[1,1],
[1,-1]])
H4 = np.kron(H2,H2)
H8 = np.kron(H4,H2)
# H16 = np.kron(H4,H4)
#Creating codeword matrix
C8 = np.concatenate((H8,-H8))
C8[C8 == -1] = 0
#All possible messages:
# messages = np.array([[0,0,0,0],[0,0,0,1],[0,0,1,0],[0,1,0,0],
# [0,0,1,1],[0,1,0,1],[0,1,1,0],[0,1,1,1],
# [1,0,0,0],[1,0,0,1],[1,0,1,0],[1,1,0,0],
# [1,0,1,1],[1,1,0,1],[1,1,1,0],[1,1,1,1]])
# Creating Generator Matrix and row reduced version
G = np.array([[1,1,1,1,1,1,1,1],
[0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1],
[0,1,0,1,0,1,0,1]])
reduced, _ = sympy.Matrix(G).rref()
# reduced = np.array(sympy.Matrix.tolist(reduced)).astype(np.float64)%2
Greduced = np.array([[1,0,0,1,0,1,1,0],
[0,1,0,1,0,1,0,1],
[0,0,1,1,0,0,1,1],
[0,0,0,0,1,1,1,1]])
# Parity check matrix
P = np.array([[1,1,1,1,0,0,0,0],
[1,1,0,0,1,1,0,0],
[1,0,1,0,1,0,1,0],
[0,1,1,0,1,0,0,1]])
Pt = np.transpose(P)
#Check that all codewords are valid with parity check
# for message in messages:
# codeword = np.dot(message,Greduced) % 2
# print(codeword)
# print(np.dot(codeword, Pt) % 2)
def getCodewords(inputchar):
# Message
#Ensures the message is length 8
binary_m = format(ord(inputchar), 'b')
if len(binary_m) < 8:
pad = '0'
binary_m = (8 - len(binary_m)) * pad + binary_m
m = np.array([[0,0,0,0,0,0,0,0]])
print('\n{}'.format(binary_m))
for i in range(0,8):
m[0][i] = binary_m[i]
M = np.array([[0,0,0,0],[0,0,0,0]])
M[0] = np.array(m[0][0:4])
M[1] = np.array(m[0][4:8])
# Codeword
C1 = np.dot(M[0],Greduced) % 2
C2 = np.dot(M[1],Greduced) % 2
codes = [C1, C2]
print('Original Codewords: {}'.format(codes))
return codes
def noise(codeword, probability):
R = codeword
# Random Error Depending on probability
error = []
errorcount = 0
for x in range(0,8):
if random.random() < probability:
error = error + [x]
R[x] = (R[x] + 1) % 2
errorcount += 1
print('Noisy Message: {} Error: Positions {}'.format(R, error))
return R
def mapToMessage(codeword):
codeword = codeword.astype(np.int64)
#Indices 1,2,3, and 5 make up the message
message = np.array([0,0,0,0])
#Grab first 4 bits from the codeword
message += codeword[:4]
#replace the 4th bit with the codeword's 5th bit
message[3] = codeword[4]
return message
def decode(received):
#Recieved codeword
R = received
decoded = None
## Syndrome calculated with rHt
Sv = np.dot(R, Pt) % 2
#Create Syndrome/error vector finding array
errorArray = np.zeros((8,8))
syndromeArray = np.zeros((8,4))
for i in range(8):
errorArray[i][i] = 1
syndromeArray[i] = np.dot(errorArray[i],Pt)
errorVector = None
if np.all(Sv==0):
print('No errors detected')
return(mapToMessage(R), "Not Corrected")
else:
for i in range(len(syndromeArray)):
if np.array_equal(syndromeArray[i].flatten(), Sv.flatten()):
errorVector = errorArray[i]
print('Error detected in position {}'.format(i))
return(mapToMessage((R - errorVector)%2),"Corrected")
print("Detected 2 errors, could not correct")
return (mapToMessage(R), "Not Corrected")
def translate(decodemsgs):
binstr = ''.join(str(x) for x in decodemsgs[0])
binstr = binstr + ''.join(str(x) for x in decodemsgs[1])
translatedcode = chr(int(binstr,2))
return translatedcode
def processMessage(message, probability):
print('Encoding and Decoding "{}"'.format(message))
decodemessage = ''
correctedIndices = []
for index, letter in enumerate(message):
A = getCodewords(letter)
code1 = noise(A[0], probability)
code2 = noise(A[1], probability)
print(code1, code2)
messagePart1, ifCorrected1 = decode(code1)
messagePart2, ifCorrected2 = decode(code2)
if (ifCorrected1 == "Corrected") or (ifCorrected2 == "Corrected"):
correctedIndices.append(index)
result = translate([messagePart1, messagePart2])
decodemessage += result
print('\nYour message was {0}'.format(decodemessage))
return (decodemessage, correctedIndices)
def hadamardDecoding(message, probability):
decodedMessage, correctedIndices = processMessage(message, probability)
errorLocs = []
for i in range(len(message)):
if message[i] != decodedMessage[i]:
errorLocs.append(i)
return [decodedMessage, errorLocs, correctedIndices]
# print(hadamardDecoding("01234567", .05))
| [
"numpy.zeros",
"numpy.transpose",
"numpy.all",
"sympy.Matrix",
"random.random",
"numpy.array",
"numpy.kron",
"numpy.dot",
"numpy.concatenate"
] | [((140, 167), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (148, 167), True, 'import numpy as np\n'), ((185, 200), 'numpy.kron', 'np.kron', (['H2', 'H2'], {}), '(H2, H2)\n', (192, 200), True, 'import numpy as np\n'), ((205, 220), 'numpy.kron', 'np.kron', (['H4', 'H2'], {}), '(H4, H2)\n', (212, 220), True, 'import numpy as np\n'), ((275, 300), 'numpy.concatenate', 'np.concatenate', (['(H8, -H8)'], {}), '((H8, -H8))\n', (289, 300), True, 'import numpy as np\n'), ((660, 779), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 0, 0, 1, \n 1], [0, 1, 0, 1, 0, 1, 0, 1]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 1, 1, \n 0, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1]])\n', (668, 779), True, 'import numpy as np\n'), ((902, 1021), 'numpy.array', 'np.array', (['[[1, 0, 0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 1, 1, 0, 0, 1, \n 1], [0, 0, 0, 0, 1, 1, 1, 1]]'], {}), '([[1, 0, 0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 1, 1, \n 0, 0, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]])\n', (910, 1021), True, 'import numpy as np\n'), ((1082, 1201), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0], [1, 0, 1, 0, 1, 0, 1, \n 0], [0, 1, 1, 0, 1, 0, 0, 1]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0], [1, 0, 1, 0, \n 1, 0, 1, 0], [0, 1, 1, 0, 1, 0, 0, 1]])\n', (1090, 1201), True, 'import numpy as np\n'), ((1216, 1231), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (1228, 1231), True, 'import numpy as np\n'), ((1654, 1690), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0]])\n', (1662, 1690), True, 'import numpy as np\n'), ((1783, 1821), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 0, 0, 0]])\n', (1791, 1821), True, 'import numpy as np\n'), ((1826, 1845), 'numpy.array', 'np.array', (['m[0][0:4]'], {}), '(m[0][0:4])\n', (1834, 1845), True, 'import numpy as np\n'), ((1857, 1876), 'numpy.array', 'np.array', (['m[0][4:8]'], {}), '(m[0][4:8])\n', (1865, 1876), True, 'import numpy as np\n'), ((2555, 2577), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (2563, 2577), True, 'import numpy as np\n'), ((2958, 2974), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (2966, 2974), True, 'import numpy as np\n'), ((2994, 3010), 'numpy.zeros', 'np.zeros', (['(8, 4)'], {}), '((8, 4))\n', (3002, 3010), True, 'import numpy as np\n'), ((3146, 3161), 'numpy.all', 'np.all', (['(Sv == 0)'], {}), '(Sv == 0)\n', (3152, 3161), True, 'import numpy as np\n'), ((796, 811), 'sympy.Matrix', 'sympy.Matrix', (['G'], {}), '(G)\n', (808, 811), False, 'import sympy\n'), ((1901, 1923), 'numpy.dot', 'np.dot', (['M[0]', 'Greduced'], {}), '(M[0], Greduced)\n', (1907, 1923), True, 'import numpy as np\n'), ((1936, 1958), 'numpy.dot', 'np.dot', (['M[1]', 'Greduced'], {}), '(M[1], Greduced)\n', (1942, 1958), True, 'import numpy as np\n'), ((2874, 2887), 'numpy.dot', 'np.dot', (['R', 'Pt'], {}), '(R, Pt)\n', (2880, 2887), True, 'import numpy as np\n'), ((3089, 3114), 'numpy.dot', 'np.dot', (['errorArray[i]', 'Pt'], {}), '(errorArray[i], Pt)\n', (3095, 3114), True, 'import numpy as np\n'), ((2218, 2233), 'random.random', 'random.random', ([], {}), '()\n', (2231, 2233), False, 'import random\n')] |
import argparse
from typing import Sequence, Union
import torch
from torch.nn import functional as F
import numpy as np
import wandb
import albumentations as A
import cv2
import pytorch_lightning as pl
from . import data
from .dep.unet import ResNetUNet
from .dep.siren import Siren
from .data.obj import Obj
from .data.tfms import denormalize
from . import utils
# could be extended to allow other mlp architectures
mlp_class_dict = dict(
siren=Siren
)
class SurfaceEmbeddingModel(pl.LightningModule):
def __init__(self, n_objs: int, emb_dim=12, n_pos=1024, n_neg=1024, lr_cnn=3e-4, lr_mlp=3e-5,
mlp_name='siren', mlp_hidden_features=256, mlp_hidden_layers=2,
key_noise=1e-3, warmup_steps=2000, separate_decoders=True,
**kwargs):
"""
:param emb_dim: number of embedding dimensions
:param n_pos: number of positive (q, k) pairs from the object mask
:param n_neg: number of negative keys, k-, from the object surface
"""
super().__init__()
self.save_hyperparameters()
self.n_objs, self.emb_dim = n_objs, emb_dim
self.n_pos, self.n_neg = n_pos, n_neg
self.lr_cnn, self.lr_mlp = lr_cnn, lr_mlp
self.warmup_steps = warmup_steps
self.key_noise = key_noise
self.separate_decoders = separate_decoders
# query model
self.cnn = ResNetUNet(
n_class=(emb_dim + 1) if separate_decoders else n_objs * (emb_dim + 1),
n_decoders=n_objs if separate_decoders else 1,
)
# key models
mlp_class = mlp_class_dict[mlp_name]
mlp_args = dict(in_features=3, out_features=emb_dim,
hidden_features=mlp_hidden_features, hidden_layers=mlp_hidden_layers)
self.mlps = torch.nn.Sequential(*[mlp_class(**mlp_args) for _ in range(n_objs)])
@staticmethod
def model_specific_args(parent_parser: argparse.ArgumentParser):
parser = parent_parser.add_argument_group(SurfaceEmbeddingModel.__name__)
parser.add_argument('--emb-dim', type=int, default=12)
parser.add_argument('--single-decoder', dest='separate_decoders', action='store_false')
return parent_parser
def get_auxs(self, objs: Sequence[Obj], crop_res: int):
random_crop_aux = data.std_auxs.RandomRotatedMaskCrop(crop_res)
return (
data.std_auxs.RgbLoader(),
data.std_auxs.MaskLoader(),
random_crop_aux.definition_aux,
# Some image augmentations probably make most sense in the original image, before rotation / rescaling
# by cropping. 'definition_aux' registers 'AABB_crop' such that the "expensive" image augmentation is only
# performed where the crop is going to be taken from.
data.std_auxs.TransformsAux(key='rgb', crop_key='AABB_crop', tfms=A.Compose([
A.GaussianBlur(blur_limit=(1, 3)),
A.ISONoise(),
A.GaussNoise(),
data.tfms.DebayerArtefacts(),
data.tfms.Unsharpen(),
A.CLAHE(), # could probably be moved to the post-crop augmentations
A.GaussianBlur(blur_limit=(1, 3)),
])),
random_crop_aux.apply_aux,
data.pose_auxs.ObjCoordAux(objs, crop_res, replace_mask=True),
data.pose_auxs.SurfaceSampleAux(objs, self.n_neg),
data.pose_auxs.MaskSamplesAux(self.n_pos),
data.std_auxs.TransformsAux(tfms=A.Compose([
A.CoarseDropout(max_height=16, max_width=16, min_width=8, min_height=8),
A.ColorJitter(hue=0.1),
])),
data.std_auxs.NormalizeAux(),
data.std_auxs.KeyFilterAux({'rgb_crop', 'obj_coord', 'obj_idx', 'surface_samples', 'mask_samples'})
)
def get_infer_auxs(self, objs: Sequence[Obj], crop_res: int, from_detections=True):
auxs = [data.std_auxs.RgbLoader()]
if not from_detections:
auxs.append(data.std_auxs.MaskLoader())
auxs.append(data.std_auxs.RandomRotatedMaskCrop(
crop_res, max_angle=0,
offset_scale=0 if from_detections else 1,
use_bbox=from_detections,
rgb_interpolation=(cv2.INTER_LINEAR,),
))
if not from_detections:
auxs += [
data.pose_auxs.ObjCoordAux(objs, crop_res, replace_mask=True),
data.pose_auxs.SurfaceSampleAux(objs, self.n_neg),
data.pose_auxs.MaskSamplesAux(self.n_pos),
]
return auxs
def configure_optimizers(self):
opt = torch.optim.Adam([
dict(params=self.cnn.parameters(), lr=1e-4),
dict(params=self.mlps.parameters(), lr=3e-5),
])
sched = dict(
scheduler=torch.optim.lr_scheduler.LambdaLR(opt, lambda i: min(1., i / self.warmup_steps)),
interval='step'
)
return [opt], [sched]
def step(self, batch, log_prefix):
img = batch['rgb_crop'] # (B, 3, H, W)
coord_img = batch['obj_coord'] # (B, H, W, 4) [-1, 1]
obj_idx = batch['obj_idx'] # (B,)
coords_neg = batch['surface_samples'] # (B, n_neg, 3) [-1, 1]
mask_samples = batch['mask_samples'] # (B, n_pos, 2)
device = img.device
B, _, H, W = img.shape
assert coords_neg.shape[1] == self.n_neg
mask = coord_img[..., 3] == 1. # (B, H, W)
y, x = mask_samples.permute(2, 0, 1) # 2 x (B, n_pos)
if self.separate_decoders:
cnn_out = self.cnn(img, obj_idx) # (B, 1 + emb_dim, H, W)
mask_lgts = cnn_out[:, 0] # (B, H, W)
queries = cnn_out[:, 1:] # (B, emb_dim, H, W)
else:
cnn_out = self.cnn(img) # (B, n_objs + n_objs * emb_dim, H, W)
mask_lgts = cnn_out[torch.arange(B), obj_idx] # (B, H, W)
queries = cnn_out[:, self.n_objs:].view(B, self.n_objs, self.emb_dim, H, W)
queries = queries[torch.arange(B), obj_idx] # (B, emb_dim, H, W)
mask_prob = torch.sigmoid(mask_lgts) # (B, H, W)
mask_loss = F.binary_cross_entropy(mask_prob, mask.type_as(mask_prob))
queries = queries[torch.arange(B).view(B, 1), :, y, x] # (B, n_pos, emb_dim)
# compute similarities for positive pairs
coords_pos = coord_img[torch.arange(B).view(B, 1), y, x, :3] # (B, n_pos, 3) [-1, 1]
coords_pos += torch.randn_like(coords_pos) * self.key_noise
keys_pos = torch.stack([self.mlps[i](c) for i, c in zip(obj_idx, coords_pos)]) # (B, n_pos, emb_dim)
sim_pos = (queries * keys_pos).sum(dim=-1, keepdim=True) # (B, n_pos, 1)
# compute similarities for negative pairs
coords_neg += torch.randn_like(coords_neg) * self.key_noise
keys_neg = torch.stack([self.mlps[i](v) for i, v in zip(obj_idx, coords_neg)]) # (B, n_neg, n_dim)
sim_neg = queries @ keys_neg.permute(0, 2, 1) # (B, n_pos, n_neg)
# loss
lgts = torch.cat((sim_pos, sim_neg), dim=-1).permute(0, 2, 1) # (B, 1 + n_neg, n_pos)
target = torch.zeros(B, self.n_pos, device=device, dtype=torch.long)
nce_loss = F.cross_entropy(lgts, target)
loss = mask_loss + nce_loss
self.log(f'{log_prefix}/loss', loss)
self.log(f'{log_prefix}/mask_loss', mask_loss)
self.log(f'{log_prefix}/nce_loss', nce_loss)
return loss
def training_step(self, batch, _):
return self.step(batch, 'train')
def validation_step(self, batch, _):
self.log_image_sample(batch)
return self.step(batch, 'valid')
def get_emb_vis(self, emb_img: torch.Tensor, mask: torch.Tensor = None, demean: torch.tensor = False):
if demean is True:
demean = emb_img[mask].view(-1, self.emb_dim).mean(dim=0)
if demean is not False:
emb_img = emb_img - demean
shape = emb_img.shape[:-1]
emb_img = emb_img.view(*shape, 3, -1).mean(dim=-1)
if mask is not None:
emb_img[~mask] = 0.
emb_img /= torch.abs(emb_img).max() + 1e-9
emb_img.mul_(0.5).add_(0.5)
return emb_img
def log_image_sample(self, batch, i=0):
img = batch['rgb_crop'][i]
obj_idx = batch['obj_idx'][i]
coord_img = batch['obj_coord'][i]
coord_mask = coord_img[..., 3] != 0
mask_lgts, query_img = self.infer_cnn(img, obj_idx)
query_img = self.get_emb_vis(query_img)
mask_est = torch.tile(torch.sigmoid(mask_lgts)[..., None], (1, 1, 3))
key_img = self.infer_mlp(coord_img[..., :3], obj_idx)
key_img = self.get_emb_vis(key_img, mask=coord_mask, demean=True)
log_img = torch.cat((
denormalize(img).permute(1, 2, 0), mask_est, query_img, key_img,
), dim=1).cpu().numpy()
self.trainer.logger.experiment.log(dict(
embeddings=wandb.Image(log_img),
global_step=self.trainer.global_step
))
@torch.no_grad()
def infer_cnn(self, img: Union[np.ndarray, torch.Tensor], obj_idx, rotation_ensemble=True):
assert not self.training
if isinstance(img, np.ndarray):
if img.dtype == np.uint8:
img = data.tfms.normalize(img)
img = torch.from_numpy(img).to(self.device)
_, h, w = img.shape
if rotation_ensemble:
img = utils.rotate_batch(img) # (4, 3, h, h)
else:
img = img[None] # (1, 3, h, w)
cnn_out = self.cnn(img, [obj_idx] * len(img) if self.separate_decoders else None)
if not self.separate_decoders:
channel_idxs = [obj_idx] + list(self.n_objs + obj_idx * self.emb_dim + np.arange(self.emb_dim))
cnn_out = cnn_out[:, channel_idxs]
# cnn_out: (B, 1+emb_dim, h, w)
if rotation_ensemble:
cnn_out = utils.rotate_batch_back(cnn_out).mean(dim=0)
else:
cnn_out = cnn_out[0]
mask_lgts, query_img = cnn_out[0], cnn_out[1:]
query_img = query_img.permute(1, 2, 0) # (h, w, emb_dim)
return mask_lgts, query_img
@torch.no_grad()
def infer_mlp(self, pts_norm: Union[np.ndarray, torch.Tensor], obj_idx):
assert not self.training
if isinstance(pts_norm, np.ndarray):
pts_norm = torch.from_numpy(pts_norm).to(self.device).float()
return self.mlps[obj_idx](pts_norm) # (..., emb_dim)
| [
"albumentations.ColorJitter",
"albumentations.ISONoise",
"torch.randn_like",
"albumentations.GaussNoise",
"albumentations.CoarseDropout",
"torch.nn.functional.cross_entropy",
"torch.cat",
"torch.sigmoid",
"albumentations.GaussianBlur",
"numpy.arange",
"torch.arange",
"wandb.Image",
"torch.ze... | [((9040, 9055), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9053, 9055), False, 'import torch\n'), ((10172, 10187), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10185, 10187), False, 'import torch\n'), ((6112, 6136), 'torch.sigmoid', 'torch.sigmoid', (['mask_lgts'], {}), '(mask_lgts)\n', (6125, 6136), False, 'import torch\n'), ((7151, 7210), 'torch.zeros', 'torch.zeros', (['B', 'self.n_pos'], {'device': 'device', 'dtype': 'torch.long'}), '(B, self.n_pos, device=device, dtype=torch.long)\n', (7162, 7210), False, 'import torch\n'), ((7230, 7259), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['lgts', 'target'], {}), '(lgts, target)\n', (7245, 7259), True, 'from torch.nn import functional as F\n'), ((6483, 6511), 'torch.randn_like', 'torch.randn_like', (['coords_pos'], {}), '(coords_pos)\n', (6499, 6511), False, 'import torch\n'), ((6794, 6822), 'torch.randn_like', 'torch.randn_like', (['coords_neg'], {}), '(coords_neg)\n', (6810, 6822), False, 'import torch\n'), ((7054, 7091), 'torch.cat', 'torch.cat', (['(sim_pos, sim_neg)'], {'dim': '(-1)'}), '((sim_pos, sim_neg), dim=-1)\n', (7063, 7091), False, 'import torch\n'), ((8555, 8579), 'torch.sigmoid', 'torch.sigmoid', (['mask_lgts'], {}), '(mask_lgts)\n', (8568, 8579), False, 'import torch\n'), ((5886, 5901), 'torch.arange', 'torch.arange', (['B'], {}), '(B)\n', (5898, 5901), False, 'import torch\n'), ((6043, 6058), 'torch.arange', 'torch.arange', (['B'], {}), '(B)\n', (6055, 6058), False, 'import torch\n'), ((8121, 8139), 'torch.abs', 'torch.abs', (['emb_img'], {}), '(emb_img)\n', (8130, 8139), False, 'import torch\n'), ((8952, 8972), 'wandb.Image', 'wandb.Image', (['log_img'], {}), '(log_img)\n', (8963, 8972), False, 'import wandb\n'), ((9328, 9349), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (9344, 9349), False, 'import torch\n'), ((6256, 6271), 'torch.arange', 'torch.arange', (['B'], {}), '(B)\n', (6268, 6271), False, 'import torch\n'), ((6398, 6413), 'torch.arange', 'torch.arange', (['B'], {}), '(B)\n', (6410, 6413), False, 'import torch\n'), ((9753, 9776), 'numpy.arange', 'np.arange', (['self.emb_dim'], {}), '(self.emb_dim)\n', (9762, 9776), True, 'import numpy as np\n'), ((2917, 2950), 'albumentations.GaussianBlur', 'A.GaussianBlur', ([], {'blur_limit': '(1, 3)'}), '(blur_limit=(1, 3))\n', (2931, 2950), True, 'import albumentations as A\n'), ((2968, 2980), 'albumentations.ISONoise', 'A.ISONoise', ([], {}), '()\n', (2978, 2980), True, 'import albumentations as A\n'), ((2998, 3012), 'albumentations.GaussNoise', 'A.GaussNoise', ([], {}), '()\n', (3010, 3012), True, 'import albumentations as A\n'), ((3115, 3124), 'albumentations.CLAHE', 'A.CLAHE', ([], {}), '()\n', (3122, 3124), True, 'import albumentations as A\n'), ((3200, 3233), 'albumentations.GaussianBlur', 'A.GaussianBlur', ([], {'blur_limit': '(1, 3)'}), '(blur_limit=(1, 3))\n', (3214, 3233), True, 'import albumentations as A\n'), ((3557, 3628), 'albumentations.CoarseDropout', 'A.CoarseDropout', ([], {'max_height': '(16)', 'max_width': '(16)', 'min_width': '(8)', 'min_height': '(8)'}), '(max_height=16, max_width=16, min_width=8, min_height=8)\n', (3572, 3628), True, 'import albumentations as A\n'), ((3646, 3668), 'albumentations.ColorJitter', 'A.ColorJitter', ([], {'hue': '(0.1)'}), '(hue=0.1)\n', (3659, 3668), True, 'import albumentations as A\n'), ((10366, 10392), 'torch.from_numpy', 'torch.from_numpy', (['pts_norm'], {}), '(pts_norm)\n', (10382, 10392), False, 'import torch\n')] |
import numpy as np
import math
import torch
import torch.nn as nn
import time
from torch.autograd import Variable
import pandas as pd
import argparse
import os
os.environ['KMP_DUPLICATE_LIB_OK']=True # For MAC MKL Optimization
np.random.seed(0)
torch.manual_seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
"""Modified from https://github.com/burklight/nonlinear-IB-PyTorch"""
def compute_distances(x):
x_norm = (x ** 2).sum(1).view(-1, 1)
x_t = torch.transpose(x, 0, 1)
x_t_norm = x_norm.view(1, -1)
dist = x_norm + x_t_norm - 2.0 * torch.mm(x, x_t)
dist = torch.clamp(dist, 0, np.inf)
return dist
def KDE_IXT_estimation(logvar_t, mean_t):
n_batch, d = mean_t.shape
var = torch.exp(logvar_t) + 1e-10 # to avoid 0's in the log
normalization_constant = math.log(n_batch)
dist = compute_distances(mean_t)
distance_contribution = -torch.mean(torch.logsumexp(input=-0.5 * dist / var, dim=1))
I_XT = normalization_constant + distance_contribution
return I_XT
def get_IXT(mean_t, logvar_t):
IXT = KDE_IXT_estimation(logvar_t, mean_t) # in natts
IXT = IXT / np.log(2) # in bits
return IXT
def get_ITY(logits_y, y):
HY_given_T = ce(logits_y, y)
ITY = (np.log(2) - HY_given_T) / np.log(2) # in bits
return ITY
def get_loss(IXT_upper, ITY_lower):
loss = -1.0 * (ITY_lower - beta * IXT_upper)
return loss
parser = argparse.ArgumentParser(description="Training for MSTREAM-IB")
parser.add_argument(
"--outputdim", type=int, help="number of output dimensions", default=12
)
parser.add_argument(
"--inputdim", type=int, help="number of input dimensions", required=True
)
parser.add_argument("--input", help="input file", required=True)
parser.add_argument("--label", help="labels file", required=True)
parser.add_argument("--output", help="output file", default="ib.txt")
parser.add_argument(
"--numRecords", type=int, help="number of records for training", default=256
)
parser.add_argument("--beta", type=float, help="beta value of IB", default=0.5)
parser.add_argument("--lr", type=float, help="learning rate", required=True)
parser.add_argument("--numEpochs", type=int, help="number of epochs", required=True)
args = parser.parse_args()
beta = args.beta
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.e1 = nn.Linear(args.inputdim, args.outputdim)
self.output_layer = nn.Linear(args.outputdim, 1)
def forward(self, x):
mu = self.e1(x)
intermed = mu + torch.randn_like(mu) * 1
x = self.output_layer(intermed)
return x, mu
ce = torch.nn.BCEWithLogitsLoss()
data = torch.Tensor(np.loadtxt(args.input, delimiter=","))
label = pd.read_csv(args.label, names=["label"])[: args.numRecords]
t = time.time()
mean, std = data.mean(0), data.std(0)
new = (data - mean) / std
new[:, std == 0] = 0
label = torch.Tensor(np.array(label.label).reshape(-1, 1))
ae = AutoEncoder().to(device)
optimizer = torch.optim.Adam(ae.parameters(), lr=args.lr)
for epoch in range(args.numEpochs):
train_x = Variable(new[: args.numRecords]).to(device)
train_y = Variable(label).to(device)
optimizer.zero_grad()
train_logits_y, train_mean_t = ae(train_x)
train_ITY = get_ITY(train_logits_y, train_y)
logvar_t = torch.Tensor([0]).to(device)
train_IXT = get_IXT(train_mean_t, logvar_t)
loss = get_loss(train_IXT, train_ITY)
loss.backward()
optimizer.step()
recon = ae.e1(torch.autograd.Variable(new).to(device)).detach().cpu()
print("Time for Training IB is ", time.time() - t)
np.savetxt(args.output, recon.numpy(), delimiter=",", fmt="%.2f")
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"pandas.read_csv",
"torch.mm",
"torch.exp",
"torch.Tensor",
"numpy.loadtxt",
"torch.nn.Linear",
"math.log",
"torch.logsumexp",
"torch.nn.BCEWithLogitsLoss",
"torch.randn_like",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.clamp",
... | [((227, 244), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (241, 244), True, 'import numpy as np\n'), ((245, 265), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (262, 265), False, 'import torch\n'), ((1436, 1498), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training for MSTREAM-IB"""'}), "(description='Training for MSTREAM-IB')\n", (1459, 1498), False, 'import argparse\n'), ((2673, 2701), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (2699, 2701), False, 'import torch\n'), ((2833, 2844), 'time.time', 'time.time', ([], {}), '()\n', (2842, 2844), False, 'import time\n'), ((487, 511), 'torch.transpose', 'torch.transpose', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (502, 511), False, 'import torch\n'), ((611, 639), 'torch.clamp', 'torch.clamp', (['dist', '(0)', 'np.inf'], {}), '(dist, 0, np.inf)\n', (622, 639), False, 'import torch\n'), ((825, 842), 'math.log', 'math.log', (['n_batch'], {}), '(n_batch)\n', (833, 842), False, 'import math\n'), ((2722, 2759), 'numpy.loadtxt', 'np.loadtxt', (['args.input'], {'delimiter': '""","""'}), "(args.input, delimiter=',')\n", (2732, 2759), True, 'import numpy as np\n'), ((2769, 2809), 'pandas.read_csv', 'pd.read_csv', (['args.label'], {'names': "['label']"}), "(args.label, names=['label'])\n", (2780, 2809), True, 'import pandas as pd\n'), ((300, 325), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (323, 325), False, 'import torch\n'), ((741, 760), 'torch.exp', 'torch.exp', (['logvar_t'], {}), '(logvar_t)\n', (750, 760), False, 'import torch\n'), ((1152, 1161), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1158, 1161), True, 'import numpy as np\n'), ((1286, 1295), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1292, 1295), True, 'import numpy as np\n'), ((2407, 2447), 'torch.nn.Linear', 'nn.Linear', (['args.inputdim', 'args.outputdim'], {}), '(args.inputdim, args.outputdim)\n', (2416, 2447), True, 'import torch.nn as nn\n'), ((2476, 2504), 'torch.nn.Linear', 'nn.Linear', (['args.outputdim', '(1)'], {}), '(args.outputdim, 1)\n', (2485, 2504), True, 'import torch.nn as nn\n'), ((3616, 3627), 'time.time', 'time.time', ([], {}), '()\n', (3625, 3627), False, 'import time\n'), ((583, 599), 'torch.mm', 'torch.mm', (['x', 'x_t'], {}), '(x, x_t)\n', (591, 599), False, 'import torch\n'), ((920, 967), 'torch.logsumexp', 'torch.logsumexp', ([], {'input': '(-0.5 * dist / var)', 'dim': '(1)'}), '(input=-0.5 * dist / var, dim=1)\n', (935, 967), False, 'import torch\n'), ((1260, 1269), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1266, 1269), True, 'import numpy as np\n'), ((2951, 2972), 'numpy.array', 'np.array', (['label.label'], {}), '(label.label)\n', (2959, 2972), True, 'import numpy as np\n'), ((3129, 3160), 'torch.autograd.Variable', 'Variable', (['new[:args.numRecords]'], {}), '(new[:args.numRecords])\n', (3137, 3160), False, 'from torch.autograd import Variable\n'), ((3187, 3202), 'torch.autograd.Variable', 'Variable', (['label'], {}), '(label)\n', (3195, 3202), False, 'from torch.autograd import Variable\n'), ((3351, 3368), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (3363, 3368), False, 'import torch\n'), ((2580, 2600), 'torch.randn_like', 'torch.randn_like', (['mu'], {}), '(mu)\n', (2596, 2600), False, 'import torch\n'), ((3526, 3554), 'torch.autograd.Variable', 'torch.autograd.Variable', (['new'], {}), '(new)\n', (3549, 3554), False, 'import torch\n')] |
import image_emotion_gender_demo
import sys
import os, time
import cv2
import time
import numpy as np
from utils.inference import load_image
import matplotlib.pyplot as plt
dir_path = os.path.dirname(os.path.realpath(__file__))
pipe_path = dir_path + "/../term_sig/end"
print(dir_path)
if not os.path.exists(pipe_path):
os.mkfifo(pipe_path)
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
total_results = {0:[],1:[],2:[],3:[],4:[],5:[],6:[]}
i = 0
maxScale = 2
pipe_fd = os.open(pipe_path, os.O_RDONLY | os.O_NONBLOCK)
with os.fdopen(pipe_fd) as pipe:
while True:
print("==========================================")
bgr_image = video_capture.read()[1]
# bgr_image = load_image('/Users/mirekrousal/workspace/CrowMoodRecognition/pics/test.jpg', grayscale=False)
results = image_emotion_gender_demo.generateResults(bgr_image, i)
print(results)
if results:
i = i + 1
maxScale = len(results) if len(results) > maxScale else maxScale
x = {}
for r in results:
cnt = x.get(r[1], 0) + 1
x[r[1]] = cnt
for r in range(0,7):
total_results.get(r).append(x.get(r,0))
print(total_results)
message = pipe.read()
if message:
print("Received: '%s'" % message)
break
time.sleep(.5)
print("We have attempted this many samples " + str(i))
ind = np.arange(i) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p0=plt.bar(ind, total_results[0], width, color='#00FFFF') #Angry
bottom_total=total_results[0];
p1=plt.bar(ind, total_results[1], width,bottom=bottom_total, color='#f4330a') #Disgust
bottom_total=[x + y for x, y in zip(bottom_total, total_results[1])]
p2=plt.bar(ind, total_results[2], width,bottom=bottom_total, color='#c037d8') #Fear
bottom_total=[x + y for x, y in zip(bottom_total, total_results[2])]
p3=plt.bar(ind, total_results[3], width,bottom=bottom_total, color='#54E730') #Happy
bottom_total=[x + y for x, y in zip(bottom_total, total_results[3])]
p4=plt.bar(ind, total_results[4], width,bottom=bottom_total, color='#405cee') #Sad
bottom_total=[x + y for x, y in zip(bottom_total, total_results[4])]
p5=plt.bar(ind, total_results[5], width,bottom=bottom_total, color='#ea84db') #Surprise
bottom_total=[x + y for x, y in zip(bottom_total, total_results[5])]
p6=plt.bar(ind, total_results[6], width,bottom=bottom_total, color='#89897f') #Neutral
plt.ylabel('Count')
plt.xlabel('Sample Number')
plt.title('Emotional Spread Over Time')
plt.xticks(ind)
plt.yticks(np.arange(0, maxScale, 1))
plt.legend((p0[0],p1[0], p2[0],p3[0],p4[0],p5[0],p6[0]), ('Angry', 'Disgust','Fear','Happy','Sad','Surprise','Neutral'))
plt.show() | [
"matplotlib.pyplot.title",
"os.open",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"os.path.realpath",
"os.path.exists",
"time.sleep",
"cv2.VideoCapture",
"numpy.arange",
"matplotlib.pyplot.xticks",
"os.mkfifo",
"os.fdopen",
"matplotlib.pyplot.ylabel",
"... | [((348, 379), 'cv2.namedWindow', 'cv2.namedWindow', (['"""window_frame"""'], {}), "('window_frame')\n", (363, 379), False, 'import cv2\n'), ((396, 415), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (412, 415), False, 'import cv2\n'), ((500, 547), 'os.open', 'os.open', (['pipe_path', '(os.O_RDONLY | os.O_NONBLOCK)'], {}), '(pipe_path, os.O_RDONLY | os.O_NONBLOCK)\n', (507, 547), False, 'import os, time\n'), ((1316, 1328), 'numpy.arange', 'np.arange', (['i'], {}), '(i)\n', (1325, 1328), True, 'import numpy as np\n'), ((1441, 1495), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'total_results[0]', 'width'], {'color': '"""#00FFFF"""'}), "(ind, total_results[0], width, color='#00FFFF')\n", (1448, 1495), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1612), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'total_results[1]', 'width'], {'bottom': 'bottom_total', 'color': '"""#f4330a"""'}), "(ind, total_results[1], width, bottom=bottom_total, color='#f4330a')\n", (1544, 1612), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1768), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'total_results[2]', 'width'], {'bottom': 'bottom_total', 'color': '"""#c037d8"""'}), "(ind, total_results[2], width, bottom=bottom_total, color='#c037d8')\n", (1700, 1768), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1921), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'total_results[3]', 'width'], {'bottom': 'bottom_total', 'color': '"""#54E730"""'}), "(ind, total_results[3], width, bottom=bottom_total, color='#54E730')\n", (1853, 1921), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2075), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'total_results[4]', 'width'], {'bottom': 'bottom_total', 'color': '"""#405cee"""'}), "(ind, total_results[4], width, bottom=bottom_total, color='#405cee')\n", (2007, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2152, 2227), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'total_results[5]', 'width'], {'bottom': 'bottom_total', 'color': '"""#ea84db"""'}), "(ind, total_results[5], width, bottom=bottom_total, color='#ea84db')\n", (2159, 2227), True, 'import matplotlib.pyplot as plt\n'), ((2309, 2384), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'total_results[6]', 'width'], {'bottom': 'bottom_total', 'color': '"""#89897f"""'}), "(ind, total_results[6], width, bottom=bottom_total, color='#89897f')\n", (2316, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2395, 2414), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (2405, 2414), True, 'import matplotlib.pyplot as plt\n'), ((2415, 2442), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample Number"""'], {}), "('Sample Number')\n", (2425, 2442), True, 'import matplotlib.pyplot as plt\n'), ((2443, 2482), 'matplotlib.pyplot.title', 'plt.title', (['"""Emotional Spread Over Time"""'], {}), "('Emotional Spread Over Time')\n", (2452, 2482), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2498), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ind'], {}), '(ind)\n', (2493, 2498), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2671), 'matplotlib.pyplot.legend', 'plt.legend', (['(p0[0], p1[0], p2[0], p3[0], p4[0], p5[0], p6[0])', "('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral')"], {}), "((p0[0], p1[0], p2[0], p3[0], p4[0], p5[0], p6[0]), ('Angry',\n 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'))\n", (2547, 2671), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2667, 2669), True, 'import matplotlib.pyplot as plt\n'), ((202, 228), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (218, 228), False, 'import os, time\n'), ((295, 320), 'os.path.exists', 'os.path.exists', (['pipe_path'], {}), '(pipe_path)\n', (309, 320), False, 'import os, time\n'), ((326, 346), 'os.mkfifo', 'os.mkfifo', (['pipe_path'], {}), '(pipe_path)\n', (335, 346), False, 'import os, time\n'), ((553, 571), 'os.fdopen', 'os.fdopen', (['pipe_fd'], {}), '(pipe_fd)\n', (562, 571), False, 'import os, time\n'), ((2510, 2535), 'numpy.arange', 'np.arange', (['(0)', 'maxScale', '(1)'], {}), '(0, maxScale, 1)\n', (2519, 2535), True, 'import numpy as np\n'), ((808, 863), 'image_emotion_gender_demo.generateResults', 'image_emotion_gender_demo.generateResults', (['bgr_image', 'i'], {}), '(bgr_image, i)\n', (849, 863), False, 'import image_emotion_gender_demo\n'), ((1236, 1251), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1246, 1251), False, 'import time\n')] |
import numpy as np
import os
import util.knn as knn
def make_train(train_path, paths):
# list all files in base directory and normalize their path name
selected = [os.path.join(str(i), f.split('.')[0]) for i in range(10)
for f in os.listdir(os.path.join(train_path, str(i)))]
return [np.where(paths == i)[0][0] for i in selected]
def main(name, train_path, dist_path):
data = np.load(dist_path)
distances = data['distances']
train_labels = data['train_labels']
test_labels = data['test_labels']
train_paths = data['train_paths']
classifier = knn.KNearestNeighborsTrainTest(distances, train_labels,
test_labels)
train = make_train(train_path, train_paths)
acc, _ = classifier.score(train)
print('{},{}'.format(name, acc))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate character '
'recognition performance of a kNN based '
'approach using the pre-computed HEDs.')
parser.add_argument('name', help='Run name.')
parser.add_argument('train_path', help='Path to folder structure '
'containing the training images.')
parser.add_argument('dist_path', help='Path to the NPZ file containing '
'the pre-computed HEDs between the graphs of the '
'training and the test set.')
args = vars(parser.parse_args())
main(**args)
| [
"numpy.load",
"util.knn.KNearestNeighborsTrainTest",
"argparse.ArgumentParser",
"numpy.where"
] | [((415, 433), 'numpy.load', 'np.load', (['dist_path'], {}), '(dist_path)\n', (422, 433), True, 'import numpy as np\n'), ((602, 670), 'util.knn.KNearestNeighborsTrainTest', 'knn.KNearestNeighborsTrainTest', (['distances', 'train_labels', 'test_labels'], {}), '(distances, train_labels, test_labels)\n', (632, 670), True, 'import util.knn as knn\n'), ((906, 1050), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate character recognition performance of a kNN based approach using the pre-computed HEDs."""'}), "(description=\n 'Evaluate character recognition performance of a kNN based approach using the pre-computed HEDs.'\n )\n", (929, 1050), False, 'import argparse\n'), ((316, 336), 'numpy.where', 'np.where', (['(paths == i)'], {}), '(paths == i)\n', (324, 336), True, 'import numpy as np\n')] |
""" Modified by <NAME> <<EMAIL>>
based on <https://github.com/riannevdberg/sylvester-flows/blob/master/models/flows.py>.
Collection of flow strategies
"""
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
#from models.layers import MaskedConv2d, MaskedLinear
from .layers import MaskedConv2d, MaskedLinear
import numpy as np
class Planar(nn.Module):
"""
PyTorch implementation of planar flows as presented in "Variational Inference with Normalizing Flows"
by <NAME>, <NAME>. Model assumes amortized flow parameters.
"""
def __init__(self):
super(Planar, self).__init__()
self.h = nn.Tanh()
self.softplus = nn.Softplus()
def der_h(self, x):
""" Derivative of tanh """
return 1 - self.h(x) ** 2
def forward(self, zk, u, w, b):
"""
Forward pass. Assumes amortized u, w and b. Conditions on diagonals of u and w for invertibility
will be be satisfied inside this function. Computes the following transformation:
z' = z + u h( w^T z + b)
or actually
z'^T = z^T + h(z^T w + b)u^T
Assumes the following input shapes:
shape u = (batch_size, z_size, 1)
shape w = (batch_size, 1, z_size)
shape b = (batch_size, 1, 1)
shape z = (batch_size, z_size).
"""
zk = zk.unsqueeze(2)
# reparameterize u such that the flow becomes invertible (see appendix paper)
uw = torch.bmm(w, u)
m_uw = -1. + self.softplus(uw)
w_norm_sq = torch.sum(w ** 2, dim=2, keepdim=True)
u_hat = u + ((m_uw - uw) * w.transpose(2, 1) / w_norm_sq)
# compute flow with u_hat
wzb = torch.bmm(w, zk) + b
z = zk + u_hat * self.h(wzb)
z = z.squeeze(2)
# compute logdetJ
psi = w * self.der_h(wzb)
log_det_jacobian = torch.log(torch.abs(1 + torch.bmm(psi, u_hat)))
log_det_jacobian = log_det_jacobian.squeeze(2).squeeze(1)
return z, log_det_jacobian
class Sylvester(nn.Module):
"""
Sylvester normalizing flow.
"""
def __init__(self, num_ortho_vecs):
super(Sylvester, self).__init__()
self.num_ortho_vecs = num_ortho_vecs
self.h = nn.Tanh()
triu_mask = torch.triu(torch.ones(num_ortho_vecs, num_ortho_vecs), diagonal=1).unsqueeze(0)
diag_idx = torch.arange(0, num_ortho_vecs).long()
self.register_buffer('triu_mask', Variable(triu_mask))
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x) ** 2
def _forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True, eval_jac=False):
"""
All flow parameters are amortized. Conditions on diagonals of R1 and R2 for invertibility need to be satisfied
outside of this function. Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param q_ortho: shape (batch_size, z_size, num_ortho_vecs)
:param b: shape: (batch_size, 1, num_ortho_vecs)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(-2) # (batch_size, 1, z_size)
# Save diagonals for log_det_j
diag_r1 = r1[..., self.diag_idx, self.diag_idx] # (batch_size, num_ortho_vecs)
diag_r2 = r2[..., self.diag_idx, self.diag_idx] # (batch_size, num_ortho_vecs)
r1_hat = r1 # (batch_size, num_ortho_vecs, num_ortho_vecs)
r2_hat = r2 # (batch_size, num_ortho_vecs, num_ortho_vecs)
qr2 = q_ortho @ r2_hat.transpose(-2, -1) # (batch_size, z_size, num_ortho_vecs)
qr1 = q_ortho @ r1_hat # (batch_size, z_size, num_ortho_vecs)
# print(zk.size(), qr2.size(), b.size())
r2qzb = zk @ qr2 + b # (batch_size, 1, num_ortho_vecs)
z = self.h(r2qzb) @ qr1.transpose(-2, -1) + zk # (batch_size, 1, z_size)
z = z.squeeze(-2) # (batch_size, z_size)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
h_deriv = self.der_h(r2qzb) # (batch_size, 1, num_ortho_vecs)
diag_j = diag_r1 * diag_r2 # (batch_size, num_ortho_vecs)
diag_j = h_deriv.squeeze(-2) * diag_j # (batch_size, num_ortho_vecs)
diag_j += 1.
log_diag_j = diag_j.abs().log() # (batch_size, num_ortho_vecs)
if sum_ldj:
log_det_j = log_diag_j.sum(-1) # (batch_size,)
else:
log_det_j = log_diag_j # (batch_size,)
if eval_jac:
jac_zk_z = torch.eye(zk.shape[-1], device=zk.device) + (qr2 * h_deriv) @ qr1.transpose(-2, -1) # (batch_size, z_size, z_size)
else:
jac_zk_z = None
return z, log_det_j, jac_zk_z
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True, eval_jac=False):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj, eval_jac)
def draw(self, zk, r1, r2, q_ortho, b):
# Amortized flow parameters
zk = zk.unsqueeze(-2) # (batch_size, 1, z_size)
# Save diagonals for log_det_j
diag_r1 = r1[..., self.diag_idx, self.diag_idx] # (batch_size, num_ortho_vecs)
diag_r2 = r2[..., self.diag_idx, self.diag_idx] # (batch_size, num_ortho_vecs)
r1_hat = r1
r2_hat = r2
qr2 = q_ortho @ r2_hat.transpose(-2, -1) # (batch_size, z_size, num_ortho_vecs)
qr1 = q_ortho @ r1_hat # (batch_size, z_size, num_ortho_vecs)
r2qzb = zk @ qr2 + b # (batch_size, 1, num_ortho_vecs)
z = self.h(r2qzb) @ qr1.transpose(-2, -1) + zk # (batch_size, 1, z_size)
z = z.squeeze(-2) # (batch_size, z_size)
return z
class TriangularSylvester(nn.Module):
"""
Sylvester normalizing flow with Q=P or Q=I.
"""
def __init__(self, z_size):
super(TriangularSylvester, self).__init__()
self.z_size = z_size
self.h = nn.Tanh()
diag_idx = torch.arange(0, z_size).long()
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x) ** 2
def _forward(self, zk, r1, r2, b, permute_z=None, sum_ldj=True):
"""
All flow parameters are amortized. conditions on diagonals of R1 and R2 need to be satisfied
outside of this function.
Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
with Q = P a permutation matrix (equal to identity matrix if permute_z=None)
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(1)
# Save diagonals for log_det_j
diag_r1 = r1[:, self.diag_idx, self.diag_idx]
diag_r2 = r2[:, self.diag_idx, self.diag_idx]
if permute_z is not None:
# permute order of z
z_per = zk[:, :, permute_z]
else:
z_per = zk
r2qzb = torch.bmm(z_per, r2.transpose(2, 1)) + b
z = torch.bmm(self.h(r2qzb), r1.transpose(2, 1))
if permute_z is not None:
# permute order of z again back again
z = z[:, :, permute_z]
z += zk
z = z.squeeze(1)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.
log_diag_j = diag_j.abs().log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)
class IAF(nn.Module):
"""
PyTorch implementation of inverse autoregressive flows as presented in
"Improving Variational Inference with Inverse Autoregressive Flow" by <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>.
Inverse Autoregressive Flow with either MADE MLPs or Pixel CNNs. Contains several flows. Each transformation
takes as an input the previous stochastic z, and a context h. The structure of each flow is then as follows:
z <- autoregressive_layer(z) + h, allow for diagonal connections
z <- autoregressive_layer(z), allow for diagonal connections
:
z <- autoregressive_layer(z), do not allow for diagonal connections.
Note that the size of h needs to be the same as h_size, which is the width of the MADE layers.
"""
def __init__(self, z_size, num_flows=2, num_hidden=0, h_size=50, forget_bias=1., conv2d=False):
super(IAF, self).__init__()
self.z_size = z_size
self.num_flows = num_flows
self.num_hidden = num_hidden
self.h_size = h_size
self.conv2d = conv2d
if not conv2d:
ar_layer = MaskedLinear
else:
ar_layer = MaskedConv2d
self.activation = torch.nn.ELU
# self.activation = torch.nn.ReLU
self.forget_bias = forget_bias
self.flows = []
self.param_list = []
# For reordering z after each flow
flip_idx = torch.arange(self.z_size - 1, -1, -1).long()
self.register_buffer('flip_idx', flip_idx)
for k in range(num_flows):
arch_z = [ar_layer(z_size, h_size), self.activation()]
self.param_list += list(arch_z[0].parameters())
z_feats = torch.nn.Sequential(*arch_z)
arch_zh = []
for j in range(num_hidden):
arch_zh += [ar_layer(h_size, h_size), self.activation()]
self.param_list += list(arch_zh[-2].parameters())
zh_feats = torch.nn.Sequential(*arch_zh)
linear_mean = ar_layer(h_size, z_size, diagonal_zeros=True)
linear_std = ar_layer(h_size, z_size, diagonal_zeros=True)
self.param_list += list(linear_mean.parameters())
self.param_list += list(linear_std.parameters())
if torch.cuda.is_available():
z_feats = z_feats.cuda()
zh_feats = zh_feats.cuda()
linear_mean = linear_mean.cuda()
linear_std = linear_std.cuda()
self.flows.append((z_feats, zh_feats, linear_mean, linear_std))
self.param_list = torch.nn.ParameterList(self.param_list)
def build_mask(self, in_features, out_features, diagonal_zeros=False):
n_in, n_out = in_features, out_features
assert n_in % n_out == 0 or n_out % n_in == 0
mask = np.ones((n_in, n_out), dtype=np.float32)
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i + 1:, i * k:(i + 1) * k] = 0
if diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[(i + 1) * k:, i:i + 1] = 0
if diagonal_zeros:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def forward(self, z, h_context, eval_jac=False):
logdets = 0.
jaceps_z = torch.ones_like(z).to(z.device)
jaceps_z = jaceps_z.diag_embed()
for i, flow in enumerate(self.flows):
if (i + 1) % 2 == 0 and not self.conv2d:
# reverse ordering to help mixing
z = z[:, self.flip_idx]
jaceps_z = jaceps_z[:, :, self.flip_idx]
# print(flow[0])
h = flow[0](z)
# for w in flow[0].state_dict(): print(w)
grad_h = (h > 0) * 1. + (h < 0) * (h + 1) # ELU gradient
jac_h1_pre = torch.tensor(self.build_mask(z.size()[-1], z.size()[-1]), device=h.device) * torch.tensor(flow[0][0].weight)
jac_h1 = jaceps_z @ jac_h1_pre @ grad_h.diag_embed()
h = h + h_context
# for w in flow[1].state_dict(): print(w)
h = flow[1](h)
grad_h = (h > 0) * 1. + (h < 0) * (h + 1)
jac_h2_pre = torch.tensor(self.build_mask(z.size()[-1], z.size()[-1]), device=h.device) * torch.tensor(flow[1][0].weight)
jac_h2 = jac_h1 @ jac_h2_pre @ grad_h.diag_embed()
mean, jac_h3 = flow[2](h, eval_jac=True)
jac_h3 = jac_h2 @ jac_h3
gate = F.sigmoid(flow[3](h) + self.forget_bias)
l_gate = 1 - gate
gate_grad = gate * (1 - gate)
gate_grad = gate_grad.diag_embed()
gate_jac = jac_h3 @ gate_grad
z = gate * z + (1 - gate) * mean
jac_zkp1 = gate_jac * z.diag_embed() + gate.diag_embed() + jac_h3 * l_gate.diag_embed() - gate_jac @ mean.diag_embed()
logdets += torch.sum(gate.log().view(gate.size(0), -1), 1)
if eval_jac: jaceps_z = jaceps_z @ jac_zkp1
# print(jaceps_z.size())
return z, logdets, jaceps_z
| [
"torch.ones_like",
"torch.ones",
"torch.bmm",
"torch.eye",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.autograd.Variable",
"torch.nn.Softplus",
"numpy.ones",
"torch.cuda.is_available",
"torch.nn.ParameterList",
"torch.arange",
"torch.sum",
"torch.tensor"
] | [((719, 728), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (726, 728), True, 'import torch.nn as nn\n'), ((753, 766), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (764, 766), True, 'import torch.nn as nn\n'), ((1543, 1558), 'torch.bmm', 'torch.bmm', (['w', 'u'], {}), '(w, u)\n', (1552, 1558), False, 'import torch\n'), ((1618, 1656), 'torch.sum', 'torch.sum', (['(w ** 2)'], {'dim': '(2)', 'keepdim': '(True)'}), '(w ** 2, dim=2, keepdim=True)\n', (1627, 1656), False, 'import torch\n'), ((2319, 2328), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2326, 2328), True, 'import torch.nn as nn\n'), ((6301, 6310), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6308, 6310), True, 'import torch.nn as nn\n'), ((11021, 11060), 'torch.nn.ParameterList', 'torch.nn.ParameterList', (['self.param_list'], {}), '(self.param_list)\n', (11043, 11060), False, 'import torch\n'), ((11255, 11295), 'numpy.ones', 'np.ones', (['(n_in, n_out)'], {'dtype': 'np.float32'}), '((n_in, n_out), dtype=np.float32)\n', (11262, 11295), True, 'import numpy as np\n'), ((1772, 1788), 'torch.bmm', 'torch.bmm', (['w', 'zk'], {}), '(w, zk)\n', (1781, 1788), False, 'import torch\n'), ((2531, 2550), 'torch.autograd.Variable', 'Variable', (['triu_mask'], {}), '(triu_mask)\n', (2539, 2550), False, 'from torch.autograd import Variable\n'), ((10143, 10171), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*arch_z'], {}), '(*arch_z)\n', (10162, 10171), False, 'import torch\n'), ((10399, 10428), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*arch_zh'], {}), '(*arch_zh)\n', (10418, 10428), False, 'import torch\n'), ((10711, 10736), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10734, 10736), False, 'import torch\n'), ((2449, 2480), 'torch.arange', 'torch.arange', (['(0)', 'num_ortho_vecs'], {}), '(0, num_ortho_vecs)\n', (2461, 2480), False, 'import torch\n'), ((4955, 4996), 'torch.eye', 'torch.eye', (['zk.shape[-1]'], {'device': 'zk.device'}), '(zk.shape[-1], device=zk.device)\n', (4964, 4996), False, 'import torch\n'), ((6331, 6354), 'torch.arange', 'torch.arange', (['(0)', 'z_size'], {}), '(0, z_size)\n', (6343, 6354), False, 'import torch\n'), ((9862, 9899), 'torch.arange', 'torch.arange', (['(self.z_size - 1)', '(-1)', '(-1)'], {}), '(self.z_size - 1, -1, -1)\n', (9874, 9899), False, 'import torch\n'), ((11865, 11883), 'torch.ones_like', 'torch.ones_like', (['z'], {}), '(z)\n', (11880, 11883), False, 'import torch\n'), ((12466, 12497), 'torch.tensor', 'torch.tensor', (['flow[0][0].weight'], {}), '(flow[0][0].weight)\n', (12478, 12497), False, 'import torch\n'), ((12831, 12862), 'torch.tensor', 'torch.tensor', (['flow[1][0].weight'], {}), '(flow[1][0].weight)\n', (12843, 12862), False, 'import torch\n'), ((1967, 1988), 'torch.bmm', 'torch.bmm', (['psi', 'u_hat'], {}), '(psi, u_hat)\n', (1976, 1988), False, 'import torch\n'), ((2361, 2403), 'torch.ones', 'torch.ones', (['num_ortho_vecs', 'num_ortho_vecs'], {}), '(num_ortho_vecs, num_ortho_vecs)\n', (2371, 2403), False, 'import torch\n')] |
import argparse
import gzip
import json
import logging
import os
import statistics
from collections import defaultdict
from time import time
import bcolz as bz
import numpy as np
import pyBigWig
import pysam
from functions import *
def get_chr_len(bam_file, chrom):
with pysam.AlignmentFile(bam_file, 'rb') as bam:
return [i['LN'] for i in bam.header['SQ'] if i['SN'] == chrom][0]
def count_clipped_read_positions(cpos_cnt):
'''
:param cpos_cnt: dictionary of clipped read positions (keys) and counts of clipped reads per position (values) as
returned by the clipped_read_pos.py script
:return: None. Prints the number of clipped read positions with clipped read support greater than the integers
specified in the range
'''
for i in range(0, 5):
logging.info('Number of positions with at least %d clipped reads: %d' %
(i + 1, len([k for k, v in cpos_cnt.items() if v > i])))
def load_channel(chr_list, outDir, ch):
channel_names_wg = ['split_reads', 'clipped_reads']
channel_names = ['coverage', 'clipped_read_distance', 'snv']
channel_data = defaultdict(dict)
if ch in channel_names_wg:
suffix = '.json.gz'
filename = os.path.join(outDir, ch, ch + suffix)
with gzip.GzipFile(filename, 'r') as fin:
logging.info('Reading %s...' % ch)
if ch == 'split_reads':
positions_with_min_support_ls, positions_with_min_support_rs, total_reads_coord_min_support, split_reads, split_read_distance = json.loads(
fin.read().decode('utf-8'))
elif ch == 'clipped_reads':
clipped_reads, clipped_reads_inversion, clipped_reads_duplication, clipped_reads_translocation = json.loads(
fin.read().decode('utf-8'))
for chrom in chr_list:
if ch == 'split_reads':
channel_data[chrom]['split_reads'] = split_reads[chrom]
channel_data[chrom][
'split_read_distance'] = split_read_distance[chrom]
del split_reads, split_read_distance
elif ch == 'clipped_reads':
channel_data[chrom]['clipped_reads'] = clipped_reads[chrom]
channel_data[chrom][
'clipped_reads_inversion'] = clipped_reads_inversion[chrom]
channel_data[chrom][
'clipped_reads_duplication'] = clipped_reads_duplication[
chrom]
channel_data[chrom][
'clipped_reads_translocation'] = clipped_reads_translocation[
chrom]
del clipped_reads, clipped_reads_inversion, \
clipped_reads_duplication, clipped_reads_translocation
elif ch in channel_names:
logging.info('Loading data for channel %s' % ch)
for chrom in chr_list:
logging.info('Loading data for Chr%s' % chrom)
suffix = '.npy.gz' if ch in ['snv', 'coverage'] else '.json.gz'
filename = os.path.join(outDir, ch, '_'.join([chrom, ch + suffix]))
assert os.path.isfile(filename), filename + " does not exists!"
logging.info('Reading %s for Chr%s' % (ch, chrom))
if suffix == '.npy.gz':
with gzip.GzipFile(filename, 'r') as fin:
channel_data[chrom][ch] = np.load(fin)
else:
with gzip.GzipFile(filename, 'r') as fin:
channel_data[chrom][ch] = json.loads(
fin.read().decode('utf-8'))
logging.info('End of reading')
return channel_data
def create_carray(ibam, chrom, twobit, outDir, cmd_name):
chrlen = get_chr_len(ibam, chrom)
channel_index = 0
n_channels = 53
chr_array = np.zeros(shape=(chrlen, n_channels), dtype=np.float64)
# dictionary of key choices
direction_list = {
'clipped_reads': [
'left_F', 'left_R', 'right_F', 'right_R', 'disc_left_F',
'disc_left_R', 'disc_right_F', 'disc_right_R', 'D_left_F',
'D_left_R', 'D_right_F', 'D_right_R', 'I_F', 'I_R'
],
'split_reads': ['left_F', 'left_R', 'right_F', 'right_R'],
'split_read_distance': ['left_F', 'left_R', 'right_F', 'right_R'],
'clipped_reads_inversion': ['before', 'after', 'before_split', 'after_split'],
'clipped_reads_duplication': ['before', 'after', 'before_split', 'after_split'],
'clipped_reads_translocation': ['opposite', 'same', 'opposite_split', 'same_split'],
'clipped_read_distance': ['forward', 'reverse']
}
channels = ['coverage', 'snv', 'clipped_reads', 'split_reads', 'clipped_read_distance',
'clipped_reads_inversion', 'clipped_reads_duplication', 'clipped_reads_translocation', 'split_read_distance']
for current_channel in channels:
current_channel_dataset = current_channel
if current_channel in ['split_reads', 'split_read_distance']:
current_channel_dataset = 'split_reads'
elif current_channel in ['clipped_reads', 'clipped_reads_inversion', 'clipped_reads_duplication', 'clipped_reads_translocation']:
current_channel_dataset = 'clipped_reads'
channel_data = load_channel([chrom], outDir, current_channel_dataset)
logging.info("Adding channel %s at index %d" %
(current_channel, channel_index))
if current_channel in ('coverage', 'snv'):
# logging.info("snv array shape %d" % channel_data[chrom][current_channel].shape[1])
# if current_channel == 'snv' and channel_data[chrom][current_channel].shape[1] == 2:
# channel_data[chrom][current_channel] = np.delete(channel_data[chrom][current_channel], 2, 0)
ch_num = channel_data[chrom][current_channel].shape[1]
chr_array[:, channel_index:channel_index +
ch_num] = channel_data[chrom][current_channel][:
chrlen, :]
channel_index += ch_num
del channel_data[chrom][current_channel]
elif current_channel in ('clipped_reads', 'split_reads', 'clipped_reads_inversion', 'clipped_reads_duplication', 'clipped_reads_translocation'):
for split_direction in direction_list[current_channel]:
if len(channel_data[chrom][current_channel]
[split_direction]) > 0:
idx = np.fromiter(channel_data[chrom][current_channel]
[split_direction].keys(),
dtype=int)
vals = np.fromiter(channel_data[chrom][current_channel]
[split_direction].values(),
dtype=np.float32)
if len(idx) > 0:
chr_array[idx, channel_index] = vals
assert chr_array[idx, channel_index].any(), \
print('{}:{} is all zeros!'.format(
current_channel, split_direction))
channel_index += 1
del channel_data[chrom][current_channel][split_direction]
elif current_channel == 'clipped_read_distance':
for split_direction in direction_list[current_channel]:
for clipped_arrangement in ['left', 'right', 'all']:
idx = np.array(
list(
map(
int, channel_data[chrom][current_channel]
[split_direction]
[clipped_arrangement].keys())))
vals = np.array(
list(
map(
statistics.median, channel_data[chrom]
[current_channel][split_direction]
[clipped_arrangement].values())))
if len(idx) > 0:
chr_array[idx, channel_index] = vals
channel_index += 1
del channel_data[chrom][current_channel][split_direction][
clipped_arrangement]
elif current_channel == 'split_read_distance':
for split_direction in direction_list[current_channel]:
idx = np.array(
list(
map(
int, channel_data[chrom][current_channel]
[split_direction].keys())))
vals = np.array(
list(
map(
statistics.median, channel_data[chrom]
[current_channel][split_direction].values())))
if len(idx) > 0:
chr_array[idx, channel_index] = vals
channel_index += 1
del channel_data[chrom][current_channel][split_direction]
current_channel = 'one_hot_encoding'
logging.info("Adding channel %s at index %d" %
(current_channel, channel_index))
nuc_list = ['A', 'T', 'C', 'G', 'N']
chr_array[:, channel_index:channel_index +
len(nuc_list)] = get_one_hot_sequence_by_list(twobit, chrom, list(np.arange(chrlen)))
channel_index += len(nuc_list)
logging.info("chr_array shape: %s" % str(chr_array.shape))
outfile = os.path.join(outDir, cmd_name, chrom + '_carray')
logging.info("Writing carray...")
a = bz.carray(chr_array, rootdir=outfile, mode='w')
a.flush()
def main():
default_chr = '22'
parser = argparse.ArgumentParser(
description='Create channels from saved data')
parser.add_argument('-b',
'--bam',
type=str,
default='../../data/test.bam',
help="Specify input file (BAM)")
parser.add_argument('-c',
'--chr',
type=str,
default=default_chr,
help="Specify chromosome")
parser.add_argument('-t',
'--twobit',
type=str,
default='../../data/test.2bit',
help="Specify input file (2bit)")
parser.add_argument('-o',
'--out',
type=str,
default='chr_array/'+default_chr+'_chr_array',
help="Specify output")
parser.add_argument(
'-p',
'--outputpath',
type=str,
default='.',
help="Specify output path")
parser.add_argument('-l',
'--logfile',
default='chr_array.log',
help='File in which to write logs.')
parser.add_argument('-w',
'--window',
type=str,
default=200,
help="Specify window size")
args = parser.parse_args()
cmd_name = 'chr_array'
output_dir = os.path.join(args.outputpath, cmd_name)
os.makedirs(output_dir, exist_ok=True)
logfilename = os.path.join(output_dir, args.chr+'_'+args.logfile)
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT,
filename=logfilename,
filemode='w',
level=logging.INFO)
t0 = time()
create_carray(ibam=args.bam,
chrom=args.chr,
twobit=args.twobit,
outDir=args.outputpath,
cmd_name=cmd_name)
logging.info('Elapsed time channel_maker_real = %f mins' % (time() - t0))
if __name__ == '__main__':
main()
| [
"numpy.load",
"argparse.ArgumentParser",
"os.makedirs",
"bcolz.carray",
"logging.basicConfig",
"pysam.AlignmentFile",
"numpy.zeros",
"time.time",
"collections.defaultdict",
"logging.info",
"os.path.isfile",
"numpy.arange",
"gzip.GzipFile",
"os.path.join"
] | [((1132, 1149), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1143, 1149), False, 'from collections import defaultdict\n'), ((3816, 3870), 'numpy.zeros', 'np.zeros', ([], {'shape': '(chrlen, n_channels)', 'dtype': 'np.float64'}), '(shape=(chrlen, n_channels), dtype=np.float64)\n', (3824, 3870), True, 'import numpy as np\n'), ((9164, 9249), 'logging.info', 'logging.info', (["('Adding channel %s at index %d' % (current_channel, channel_index))"], {}), "('Adding channel %s at index %d' % (current_channel, channel_index)\n )\n", (9176, 9249), False, 'import logging\n'), ((9562, 9611), 'os.path.join', 'os.path.join', (['outDir', 'cmd_name', "(chrom + '_carray')"], {}), "(outDir, cmd_name, chrom + '_carray')\n", (9574, 9611), False, 'import os\n'), ((9616, 9649), 'logging.info', 'logging.info', (['"""Writing carray..."""'], {}), "('Writing carray...')\n", (9628, 9649), False, 'import logging\n'), ((9658, 9705), 'bcolz.carray', 'bz.carray', (['chr_array'], {'rootdir': 'outfile', 'mode': '"""w"""'}), "(chr_array, rootdir=outfile, mode='w')\n", (9667, 9705), True, 'import bcolz as bz\n'), ((9770, 9840), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create channels from saved data"""'}), "(description='Create channels from saved data')\n", (9793, 9840), False, 'import argparse\n'), ((11261, 11300), 'os.path.join', 'os.path.join', (['args.outputpath', 'cmd_name'], {}), '(args.outputpath, cmd_name)\n', (11273, 11300), False, 'import os\n'), ((11305, 11343), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (11316, 11343), False, 'import os\n'), ((11362, 11417), 'os.path.join', 'os.path.join', (['output_dir', "(args.chr + '_' + args.logfile)"], {}), "(output_dir, args.chr + '_' + args.logfile)\n", (11374, 11417), False, 'import os\n'), ((11457, 11551), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT', 'filename': 'logfilename', 'filemode': '"""w"""', 'level': 'logging.INFO'}), "(format=FORMAT, filename=logfilename, filemode='w',\n level=logging.INFO)\n", (11476, 11551), False, 'import logging\n'), ((11630, 11636), 'time.time', 'time', ([], {}), '()\n', (11634, 11636), False, 'from time import time\n'), ((279, 314), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_file', '"""rb"""'], {}), "(bam_file, 'rb')\n", (298, 314), False, 'import pysam\n'), ((1229, 1266), 'os.path.join', 'os.path.join', (['outDir', 'ch', '(ch + suffix)'], {}), '(outDir, ch, ch + suffix)\n', (1241, 1266), False, 'import os\n'), ((5349, 5434), 'logging.info', 'logging.info', (["('Adding channel %s at index %d' % (current_channel, channel_index))"], {}), "('Adding channel %s at index %d' % (current_channel, channel_index)\n )\n", (5361, 5434), False, 'import logging\n'), ((1280, 1308), 'gzip.GzipFile', 'gzip.GzipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1293, 1308), False, 'import gzip\n'), ((1329, 1363), 'logging.info', 'logging.info', (["('Reading %s...' % ch)"], {}), "('Reading %s...' % ch)\n", (1341, 1363), False, 'import logging\n'), ((2816, 2864), 'logging.info', 'logging.info', (["('Loading data for channel %s' % ch)"], {}), "('Loading data for channel %s' % ch)\n", (2828, 2864), False, 'import logging\n'), ((9430, 9447), 'numpy.arange', 'np.arange', (['chrlen'], {}), '(chrlen)\n', (9439, 9447), True, 'import numpy as np\n'), ((2908, 2954), 'logging.info', 'logging.info', (["('Loading data for Chr%s' % chrom)"], {}), "('Loading data for Chr%s' % chrom)\n", (2920, 2954), False, 'import logging\n'), ((3130, 3154), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3144, 3154), False, 'import os\n'), ((3200, 3250), 'logging.info', 'logging.info', (["('Reading %s for Chr%s' % (ch, chrom))"], {}), "('Reading %s for Chr%s' % (ch, chrom))\n", (3212, 3250), False, 'import logging\n'), ((3603, 3633), 'logging.info', 'logging.info', (['"""End of reading"""'], {}), "('End of reading')\n", (3615, 3633), False, 'import logging\n'), ((11885, 11891), 'time.time', 'time', ([], {}), '()\n', (11889, 11891), False, 'from time import time\n'), ((3309, 3337), 'gzip.GzipFile', 'gzip.GzipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (3322, 3337), False, 'import gzip\n'), ((3392, 3404), 'numpy.load', 'np.load', (['fin'], {}), '(fin)\n', (3399, 3404), True, 'import numpy as np\n'), ((3444, 3472), 'gzip.GzipFile', 'gzip.GzipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (3457, 3472), False, 'import gzip\n')] |
'''
File name: nonMaxSup.py
Author: <NAME>
Date created: Dec. 8, 2019
'''
import numpy as np
from helpers import get_edge_angle
'''
File clarification:
Find local maximum edge pixel using NMS along the line of the gradient
- Input Mag: H x W matrix represents the magnitude of derivatives
- Input Ori: H x W matrix represents the orientation of derivatives
- Output M: H x W binary matrix represents the edge map after non-maximum suppression
'''
def nonMaxSup(Mag, Ori, grad_Ori):
###############################################################################
# Your code here: do the non maximum suppression
###############################################################################
suppressed = np.copy(Mag)
suppressed.fill(0)
shape = suppressed.shape
for i in range(1, shape[0] - 1):
for j in range(1, shape[1] - 1):
g0 = Mag[i, j]
edge_ori=get_edge_angle(Ori[i,j])
if edge_ori == 0:
if g0 >= Mag[i, j + 1] and g0 >= Mag[i, j - 1]:
suppressed[i, j] = 1
else:
suppressed[i, j] = 0
elif edge_ori == np.pi / 4:
if g0 >= Mag[i + 1, j + 1] and g0 >= Mag[i - 1, j - 1]:
suppressed[i, j] = 1
else:
suppressed[i, j] = 0
elif edge_ori == np.pi / 2:
if g0 >= Mag[i + 1, j] and g0 >= Mag[i - 1, j]:
suppressed[i, j] = 1
else:
suppressed[i, j] = 0
else:
if g0 >= Mag[i + 1, j - 1] and g0 >= Mag[i - 1, j + 1]:
suppressed[i, j] = 1
else:
suppressed[i, j] = 0
return suppressed
| [
"helpers.get_edge_angle",
"numpy.copy"
] | [((748, 760), 'numpy.copy', 'np.copy', (['Mag'], {}), '(Mag)\n', (755, 760), True, 'import numpy as np\n'), ((940, 965), 'helpers.get_edge_angle', 'get_edge_angle', (['Ori[i, j]'], {}), '(Ori[i, j])\n', (954, 965), False, 'from helpers import get_edge_angle\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import os
import utils_snpko as utils
logger = utils.logger
def parse_knockoff_results(args, df_uncorrected=None):
if df_uncorrected is None:
df_uncorrected = pd.read_csv(os.path.join(
args.results_dir, 'uncorrected.csv'))
grouped_uncorrected = df_uncorrected.groupby(['SNP', 'label'])
label = None
fdr_type = None
SNP = None
result_table = []
with open(os.path.join(args.results_dir, 'knockoff_trials.txt')) as fp:
for line in fp:
# chomp:
line = line[:-1]
if line.startswith('Target FDR: '):
# The observed FDR takes precedence over args.fdr
fdr = float(line[12:-1]) / 100.0
elif line.startswith('Label: '):
label = line[7:]
elif line.startswith('Type of FDR: '):
fdr_type = line[13:]
elif line.startswith(' rs'):
(SNP, obs_freq) = line[3:].split(' : ')
obs_freq = float(obs_freq[:-1]) / 100.0
index = grouped_uncorrected.groups[(SNP, label)][0]
uncorrected_p_value = df_uncorrected[
'uncorrected_p_value'].values[index]
uncorrected_odds_ratio = df_uncorrected[
'uncorrected_odds_ratio'].values[index]
results = (label, fdr_type, SNP, obs_freq,
uncorrected_p_value, uncorrected_odds_ratio, fdr)
result_table.append(results)
# Produce simpler summary output
(label_list, fdr_type_list, SNP_list, obs_freq_list,
uncorrected_p_value_list, uncorrected_odds_ratio_list, fdr_list) = zip(*result_table)
df_results = pd.DataFrame(
{'label': label_list, 'fdr_type': fdr_type_list,
'SNP': SNP_list, 'obs_freq': obs_freq_list,
'uncorrected_p_value': uncorrected_p_value_list,
'uncorrected_odds_ratio': uncorrected_odds_ratio_list,
'fdr': fdr_list
})
df_results.to_csv(os.path.join(args.results_dir, 'all_results.csv'),
index=False)
return(df_results, fdr)
def summarize(args):
'''
Summarize results about significantly predictive SNPs.
'''
logger.info("####################################")
logger.info("Summarizing final results")
df_uncorrected = pd.read_csv(os.path.join(
args.results_dir, 'uncorrected.csv'))
# Filter uncorrected down to uncorrected p-value <0.05
df_uncorrected.iloc[df_uncorrected['uncorrected_p_value'].values < 0.05].to_csv(
os.path.join(args.results_dir, 'exploratory.csv'), index=False)
(df_results, fdr) = parse_knockoff_results(
args, df_uncorrected=df_uncorrected)
# Restrict to SNPs that occur more frequently than a target threshold
df_sig_threshold = df_results.iloc[
df_results.obs_freq.values > args.obs_freq]
df_sig_threshold.to_csv(os.path.join(args.results_dir, 'sig_results.csv'),
index=False)
# Alternately, extract the single most-frequently occuring SNP of each type
grouped = df_results.groupby(['fdr_type', 'label'])
max_index = []
for _, df_fdr_label in grouped:
local_index_of_biggest = np.argmax(df_fdr_label.obs_freq.values)
max_index.append(df_fdr_label.index[local_index_of_biggest])
df_sig_max = df_results.iloc[max_index]
df_sig_max = df_sig_max.sort_values(by='obs_freq', ascending=False)
df_sig_max.to_csv(os.path.join(args.results_dir, 'sig_max.csv'),
index=False)
# Add the probabilities across all trials (gives something like the expected number
# of times that a particular SNP shows up in *any* trial)
df_expected = df_results[['SNP', 'fdr_type', 'label', 'obs_freq']]
df_expected = df_expected.groupby(['SNP', 'fdr_type']).sum().reset_index()
df_expected['fdr'] = fdr
df_expected.rename(columns={'obs_freq': 'expected_obs_freq'}, inplace=True)
df_expected = df_expected.sort_values(
by='expected_obs_freq', ascending=False)
df_expected.to_csv(os.path.join(args.results_dir, 'expected_appearance.csv'),
index=False)
if __name__ == '__main__':
args = utils.parse_arguments()
utils.safe_mkdir(args.working_dir)
utils.initialize_logger(args)
summarize(args)
| [
"pandas.DataFrame",
"utils_snpko.parse_arguments",
"numpy.argmax",
"utils_snpko.initialize_logger",
"utils_snpko.safe_mkdir",
"os.path.join"
] | [((1774, 2013), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': label_list, 'fdr_type': fdr_type_list, 'SNP': SNP_list,\n 'obs_freq': obs_freq_list, 'uncorrected_p_value':\n uncorrected_p_value_list, 'uncorrected_odds_ratio':\n uncorrected_odds_ratio_list, 'fdr': fdr_list}"], {}), "({'label': label_list, 'fdr_type': fdr_type_list, 'SNP':\n SNP_list, 'obs_freq': obs_freq_list, 'uncorrected_p_value':\n uncorrected_p_value_list, 'uncorrected_odds_ratio':\n uncorrected_odds_ratio_list, 'fdr': fdr_list})\n", (1786, 2013), True, 'import pandas as pd\n'), ((4300, 4323), 'utils_snpko.parse_arguments', 'utils.parse_arguments', ([], {}), '()\n', (4321, 4323), True, 'import utils_snpko as utils\n'), ((4328, 4362), 'utils_snpko.safe_mkdir', 'utils.safe_mkdir', (['args.working_dir'], {}), '(args.working_dir)\n', (4344, 4362), True, 'import utils_snpko as utils\n'), ((4367, 4396), 'utils_snpko.initialize_logger', 'utils.initialize_logger', (['args'], {}), '(args)\n', (4390, 4396), True, 'import utils_snpko as utils\n'), ((2080, 2129), 'os.path.join', 'os.path.join', (['args.results_dir', '"""all_results.csv"""'], {}), "(args.results_dir, 'all_results.csv')\n", (2092, 2129), False, 'import os\n'), ((2428, 2477), 'os.path.join', 'os.path.join', (['args.results_dir', '"""uncorrected.csv"""'], {}), "(args.results_dir, 'uncorrected.csv')\n", (2440, 2477), False, 'import os\n'), ((2641, 2690), 'os.path.join', 'os.path.join', (['args.results_dir', '"""exploratory.csv"""'], {}), "(args.results_dir, 'exploratory.csv')\n", (2653, 2690), False, 'import os\n'), ((2994, 3043), 'os.path.join', 'os.path.join', (['args.results_dir', '"""sig_results.csv"""'], {}), "(args.results_dir, 'sig_results.csv')\n", (3006, 3043), False, 'import os\n'), ((3311, 3350), 'numpy.argmax', 'np.argmax', (['df_fdr_label.obs_freq.values'], {}), '(df_fdr_label.obs_freq.values)\n', (3320, 3350), True, 'import numpy as np\n'), ((3558, 3603), 'os.path.join', 'os.path.join', (['args.results_dir', '"""sig_max.csv"""'], {}), "(args.results_dir, 'sig_max.csv')\n", (3570, 3603), False, 'import os\n'), ((4165, 4222), 'os.path.join', 'os.path.join', (['args.results_dir', '"""expected_appearance.csv"""'], {}), "(args.results_dir, 'expected_appearance.csv')\n", (4177, 4222), False, 'import os\n'), ((248, 297), 'os.path.join', 'os.path.join', (['args.results_dir', '"""uncorrected.csv"""'], {}), "(args.results_dir, 'uncorrected.csv')\n", (260, 297), False, 'import os\n'), ((469, 522), 'os.path.join', 'os.path.join', (['args.results_dir', '"""knockoff_trials.txt"""'], {}), "(args.results_dir, 'knockoff_trials.txt')\n", (481, 522), False, 'import os\n')] |
# This script computes the matter Pk in real- and redshift-space. It takes as input
# the first and last number of the wanted realizations, the cosmology and the snapnum
# In redshift-space it computes the power spectrum along the 3 different axes.
import argparse
from mpi4py import MPI
import numpy as np
import sys,os
import readgadget,readfof
import redshift_space_library as RSL
import Pk_library as PKL
import MAS_library as MASL
###### MPI DEFINITIONS ######
comm = MPI.COMM_WORLD
nprocs = comm.Get_size()
myrank = comm.Get_rank()
# read the first and last realization to identify voids
parser = argparse.ArgumentParser(description="This script computes the bispectrum")
parser.add_argument("first", help="first realization number", type=int)
parser.add_argument("last", help="last realization number", type=int)
parser.add_argument("cosmo", help="folder with the realizations")
parser.add_argument("z", help="redshift")
args = parser.parse_args()
first, last, cosmo, z = args.first, args.last, args.cosmo, args.z
##################################### INPUT #########################################
# folder containing the snapshots
root = '/simons/scratch/fvillaescusa/pdf_information/density_field_2D'
# Pk parameters
BoxSize = 1000.0 #Mpc/h
grid = 512
MAS = 'CIC'
threads = 2
# folder that containts the results
folder_out = '/simons/scratch/fvillaescusa/pdf_information/Pk_2D/matter/'
#####################################################################################
# create output folder if it does not exist
if myrank==0 and not(os.path.exists(folder_out+cosmo)):
os.system('mkdir %s/%s/'%(folder_out,cosmo))
comm.Barrier()
# get the realizations each cpu works on
numbers = np.where(np.arange(args.first, args.last)%nprocs==myrank)[0]
numbers = np.arange(args.first, args.last)[numbers]
######## standard simulations #########
for i in numbers:
# get the name of the output file
fpk = '%s/%s/%d/Pk2D_m_z=%s.txt'%(folder_out,cosmo,i,z)
if os.path.exists(fpk): continue
# find the density field file and read it
f_df = '%s/%s/%d/df_z=%s.npy'%(root,cosmo,i,z)
if not(os.path.exists(f_df)): continue
df = np.load(f_df)
# create output folder if it does not exists
if not(os.path.exists('%s/%s/%d'%(folder_out,cosmo,i))):
os.system('mkdir %s/%s/%d'%(folder_out,cosmo,i))
# compute matter Pk
Pk = PKL.Pk_plane(df,BoxSize,MAS,threads)
np.savetxt(fpk, np.transpose([Pk.k, Pk.Pk]))
###### paired fixed realizations ######
for i in numbers:
for pair in [0,1]:
# get the name of the output file
fpk = '%s/%s/NCV_%d_%d/Pk2D_m_z=%s.txt'%(folder_out,cosmo,pair,i,z)
if os.path.exists(fpk): continue
# find the density field file and read it
f_df = '%s/%s/NCV_%d_%d/df_z=%s.npy'%(root,cosmo,pair,i,z)
if not(os.path.exists(f_df)): continue
df = np.load(f_df)
# create output folder if it does not exists
if not(os.path.exists('%s/%s/NCV_%d_%d'%(folder_out,cosmo,pair,i))):
os.system('mkdir %s/%s/NCV_%d_%d'%(folder_out,cosmo,pair,i))
# compute matter Pk
Pk = PKL.Pk_plane(df,BoxSize,MAS,threads)
np.savetxt(fpk, np.transpose([Pk.k, Pk.Pk]))
| [
"numpy.load",
"argparse.ArgumentParser",
"os.path.exists",
"os.system",
"numpy.transpose",
"numpy.arange",
"Pk_library.Pk_plane"
] | [((644, 718), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This script computes the bispectrum"""'}), "(description='This script computes the bispectrum')\n", (667, 718), False, 'import argparse\n'), ((1656, 1703), 'os.system', 'os.system', (["('mkdir %s/%s/' % (folder_out, cosmo))"], {}), "('mkdir %s/%s/' % (folder_out, cosmo))\n", (1665, 1703), False, 'import sys, os\n'), ((1839, 1871), 'numpy.arange', 'np.arange', (['args.first', 'args.last'], {}), '(args.first, args.last)\n', (1848, 1871), True, 'import numpy as np\n'), ((2047, 2066), 'os.path.exists', 'os.path.exists', (['fpk'], {}), '(fpk)\n', (2061, 2066), False, 'import sys, os\n'), ((2229, 2242), 'numpy.load', 'np.load', (['f_df'], {}), '(f_df)\n', (2236, 2242), True, 'import numpy as np\n'), ((2445, 2484), 'Pk_library.Pk_plane', 'PKL.Pk_plane', (['df', 'BoxSize', 'MAS', 'threads'], {}), '(df, BoxSize, MAS, threads)\n', (2457, 2484), True, 'import Pk_library as PKL\n'), ((1615, 1649), 'os.path.exists', 'os.path.exists', (['(folder_out + cosmo)'], {}), '(folder_out + cosmo)\n', (1629, 1649), False, 'import sys, os\n'), ((2187, 2207), 'os.path.exists', 'os.path.exists', (['f_df'], {}), '(f_df)\n', (2201, 2207), False, 'import sys, os\n'), ((2304, 2355), 'os.path.exists', 'os.path.exists', (["('%s/%s/%d' % (folder_out, cosmo, i))"], {}), "('%s/%s/%d' % (folder_out, cosmo, i))\n", (2318, 2355), False, 'import sys, os\n'), ((2362, 2414), 'os.system', 'os.system', (["('mkdir %s/%s/%d' % (folder_out, cosmo, i))"], {}), "('mkdir %s/%s/%d' % (folder_out, cosmo, i))\n", (2371, 2414), False, 'import sys, os\n'), ((2502, 2529), 'numpy.transpose', 'np.transpose', (['[Pk.k, Pk.Pk]'], {}), '([Pk.k, Pk.Pk])\n', (2514, 2529), True, 'import numpy as np\n'), ((2746, 2765), 'os.path.exists', 'os.path.exists', (['fpk'], {}), '(fpk)\n', (2760, 2765), False, 'import sys, os\n'), ((2956, 2969), 'numpy.load', 'np.load', (['f_df'], {}), '(f_df)\n', (2963, 2969), True, 'import numpy as np\n'), ((3216, 3255), 'Pk_library.Pk_plane', 'PKL.Pk_plane', (['df', 'BoxSize', 'MAS', 'threads'], {}), '(df, BoxSize, MAS, threads)\n', (3228, 3255), True, 'import Pk_library as PKL\n'), ((2910, 2930), 'os.path.exists', 'os.path.exists', (['f_df'], {}), '(f_df)\n', (2924, 2930), False, 'import sys, os\n'), ((3039, 3103), 'os.path.exists', 'os.path.exists', (["('%s/%s/NCV_%d_%d' % (folder_out, cosmo, pair, i))"], {}), "('%s/%s/NCV_%d_%d' % (folder_out, cosmo, pair, i))\n", (3053, 3103), False, 'import sys, os\n'), ((3113, 3178), 'os.system', 'os.system', (["('mkdir %s/%s/NCV_%d_%d' % (folder_out, cosmo, pair, i))"], {}), "('mkdir %s/%s/NCV_%d_%d' % (folder_out, cosmo, pair, i))\n", (3122, 3178), False, 'import sys, os\n'), ((3277, 3304), 'numpy.transpose', 'np.transpose', (['[Pk.k, Pk.Pk]'], {}), '([Pk.k, Pk.Pk])\n', (3289, 3304), True, 'import numpy as np\n'), ((1777, 1809), 'numpy.arange', 'np.arange', (['args.first', 'args.last'], {}), '(args.first, args.last)\n', (1786, 1809), True, 'import numpy as np\n')] |
import torch
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import cv2
import random
# generating random text effects on the distance map
# grayimg1, gratimg2: two distance maps to be colorized using the same text effects
# maxcolornum: determine the richness of color
def colorize_two(grayimg1, grayimg2, maxcolornum):
# number of color anchors
if maxcolornum == 0:
colornum = 2
else:
colornum = random.randint(1,maxcolornum) + 2
# max distance
maxdist = max(np.max(grayimg1), np.max(grayimg2))
# checkpoints to create color anchors
checkpoints = []
checkpoints+=[int(np.round(i*(maxdist/(colornum-1.0)))) for i in range(colornum-1)]
checkpoints+=[int(maxdist)]
checkpoints+=[int(256)]
# color map
cmap = np.random.randint(0,255,(colornum+1,3))
mx = 256 # if gray.dtype==np.uint8 else 65535
lut = np.empty(shape=(256, 3))
lastval = checkpoints[0]
lastcol = cmap[0]
for i in range(colornum):
col = cmap[i+1]
val = checkpoints[i+1]
for i in range(3):
lut[lastval:val, i] = np.linspace(
lastcol[i], col[i], val - lastval)
lastcol = col
lastval = val
# generating text effects on grayimg1
[w1, h1] = grayimg1.shape
colorimg1 = np.empty(shape=(w1, h1, 3), dtype=np.uint8)
for i in range(3):
colorimg1[..., i] = cv2.LUT(grayimg1, lut[:, i])
colorimg1 = np.clip(cv2.GaussianBlur(colorimg1, (3,3), 3, 1), a_min=0, a_max=255) / 255.0
colorimg1 = np.transpose(colorimg1, axes=(2,0,1))
# generating text effects on grayimg2
[w2, h2] = grayimg2.shape
colorimg2 = np.empty(shape=(w2, h2, 3), dtype=np.uint8)
for i in range(3):
colorimg2[..., i] = cv2.LUT(grayimg2, lut[:, i])
colorimg2 = np.clip(cv2.GaussianBlur(colorimg2, (3,3), 3, 1), a_min=0, a_max=255) / 255.0
colorimg2 = np.transpose(colorimg2, axes=(2,0,1))
return [torch.tensor(colorimg1, dtype=torch.float32), torch.tensor(colorimg2, dtype=torch.float32)]
# generating random text effects based on the pixel distance from the glyph
# img1, img2: two PIL images to be renderred
def generate_styles(img1, img2):
r, g, b = img1.split()
mask1 = transforms.ToTensor()(r).repeat(3,1,1)
fg1 = transforms.ToTensor()(g)*255
bg1 = transforms.ToTensor()(b)*255
r, g, b = img2.split()
mask2 = transforms.ToTensor()(r).repeat(3,1,1)
fg2 = transforms.ToTensor()(g)*255
bg2 = transforms.ToTensor()(b)*255
[fgc1, fgc2] = colorize_two(fg1.squeeze().numpy().astype(np.uint8),
fg2.squeeze().numpy().astype(np.uint8), 0)
[bgc1, bgc2] = colorize_two(bg1.squeeze().numpy().astype(np.uint8),
bg2.squeeze().numpy().astype(np.uint8), 3)
texteffects1 = fgc1*mask1+bgc1*(1-mask1)
texteffects2 = fgc2*mask2+bgc2*(1-mask2)
return transforms.ToPILImage()(texteffects1), transforms.ToPILImage()(texteffects2) | [
"cv2.GaussianBlur",
"random.randint",
"numpy.empty",
"numpy.transpose",
"torchvision.transforms.ToPILImage",
"numpy.max",
"numpy.random.randint",
"cv2.LUT",
"numpy.linspace",
"numpy.round",
"torch.tensor",
"torchvision.transforms.ToTensor"
] | [((825, 869), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(colornum + 1, 3)'], {}), '(0, 255, (colornum + 1, 3))\n', (842, 869), True, 'import numpy as np\n'), ((927, 951), 'numpy.empty', 'np.empty', ([], {'shape': '(256, 3)'}), '(shape=(256, 3))\n', (935, 951), True, 'import numpy as np\n'), ((1347, 1390), 'numpy.empty', 'np.empty', ([], {'shape': '(w1, h1, 3)', 'dtype': 'np.uint8'}), '(shape=(w1, h1, 3), dtype=np.uint8)\n', (1355, 1390), True, 'import numpy as np\n'), ((1595, 1634), 'numpy.transpose', 'np.transpose', (['colorimg1'], {'axes': '(2, 0, 1)'}), '(colorimg1, axes=(2, 0, 1))\n', (1607, 1634), True, 'import numpy as np\n'), ((1726, 1769), 'numpy.empty', 'np.empty', ([], {'shape': '(w2, h2, 3)', 'dtype': 'np.uint8'}), '(shape=(w2, h2, 3), dtype=np.uint8)\n', (1734, 1769), True, 'import numpy as np\n'), ((1966, 2005), 'numpy.transpose', 'np.transpose', (['colorimg2'], {'axes': '(2, 0, 1)'}), '(colorimg2, axes=(2, 0, 1))\n', (1978, 2005), True, 'import numpy as np\n'), ((544, 560), 'numpy.max', 'np.max', (['grayimg1'], {}), '(grayimg1)\n', (550, 560), True, 'import numpy as np\n'), ((562, 578), 'numpy.max', 'np.max', (['grayimg2'], {}), '(grayimg2)\n', (568, 578), True, 'import numpy as np\n'), ((1443, 1471), 'cv2.LUT', 'cv2.LUT', (['grayimg1', 'lut[:, i]'], {}), '(grayimg1, lut[:, i])\n', (1450, 1471), False, 'import cv2\n'), ((1822, 1850), 'cv2.LUT', 'cv2.LUT', (['grayimg2', 'lut[:, i]'], {}), '(grayimg2, lut[:, i])\n', (1829, 1850), False, 'import cv2\n'), ((2021, 2065), 'torch.tensor', 'torch.tensor', (['colorimg1'], {'dtype': 'torch.float32'}), '(colorimg1, dtype=torch.float32)\n', (2033, 2065), False, 'import torch\n'), ((2067, 2111), 'torch.tensor', 'torch.tensor', (['colorimg2'], {'dtype': 'torch.float32'}), '(colorimg2, dtype=torch.float32)\n', (2079, 2111), False, 'import torch\n'), ((468, 498), 'random.randint', 'random.randint', (['(1)', 'maxcolornum'], {}), '(1, maxcolornum)\n', (482, 498), False, 'import random\n'), ((667, 709), 'numpy.round', 'np.round', (['(i * (maxdist / (colornum - 1.0)))'], {}), '(i * (maxdist / (colornum - 1.0)))\n', (675, 709), True, 'import numpy as np\n'), ((1150, 1196), 'numpy.linspace', 'np.linspace', (['lastcol[i]', 'col[i]', '(val - lastval)'], {}), '(lastcol[i], col[i], val - lastval)\n', (1161, 1196), True, 'import numpy as np\n'), ((1509, 1550), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['colorimg1', '(3, 3)', '(3)', '(1)'], {}), '(colorimg1, (3, 3), 3, 1)\n', (1525, 1550), False, 'import cv2\n'), ((1880, 1921), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['colorimg2', '(3, 3)', '(3)', '(1)'], {}), '(colorimg2, (3, 3), 3, 1)\n', (1896, 1921), False, 'import cv2\n'), ((2358, 2379), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2377, 2379), True, 'import torchvision.transforms as transforms\n'), ((2397, 2418), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2416, 2418), True, 'import torchvision.transforms as transforms\n'), ((2514, 2535), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2533, 2535), True, 'import torchvision.transforms as transforms\n'), ((2554, 2575), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2573, 2575), True, 'import torchvision.transforms as transforms\n'), ((2981, 3004), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3002, 3004), True, 'import torchvision.transforms as transforms\n'), ((3020, 3043), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3041, 3043), True, 'import torchvision.transforms as transforms\n'), ((2309, 2330), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2328, 2330), True, 'import torchvision.transforms as transforms\n'), ((2465, 2486), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2484, 2486), True, 'import torchvision.transforms as transforms\n')] |
from PIL import Image
import base64
import io
import cv2
import numpy as np
def b64_img(base64img):
base64_decoded = base64.b64decode(base64img)
image = Image.open(io.BytesIO(base64_decoded))
image_np = np.array(image)
return image_np
def img_b64(img):
cv2.imwrite('./test.jpg', img)
with open('./test.jpg', "rb") as image_file:
encoded_image = base64.b64encode(image_file.read())
b64 = encoded_image.decode('utf-8')
return b64
if __name__ == "__main__":
img = cv2.imread('/home/mugesh/r/demo/Assets/img2.jpeg')
b = img_b64(img)
ig = b64_img(b)
cv2.imshow("Frame", ig)
cv2.waitKey(0)
| [
"io.BytesIO",
"cv2.waitKey",
"cv2.imwrite",
"base64.b64decode",
"cv2.imread",
"numpy.array",
"cv2.imshow"
] | [((123, 150), 'base64.b64decode', 'base64.b64decode', (['base64img'], {}), '(base64img)\n', (139, 150), False, 'import base64\n'), ((217, 232), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (225, 232), True, 'import numpy as np\n'), ((277, 307), 'cv2.imwrite', 'cv2.imwrite', (['"""./test.jpg"""', 'img'], {}), "('./test.jpg', img)\n", (288, 307), False, 'import cv2\n'), ((519, 569), 'cv2.imread', 'cv2.imread', (['"""/home/mugesh/r/demo/Assets/img2.jpeg"""'], {}), "('/home/mugesh/r/demo/Assets/img2.jpeg')\n", (529, 569), False, 'import cv2\n'), ((617, 640), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'ig'], {}), "('Frame', ig)\n", (627, 640), False, 'import cv2\n'), ((646, 660), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (657, 660), False, 'import cv2\n'), ((174, 200), 'io.BytesIO', 'io.BytesIO', (['base64_decoded'], {}), '(base64_decoded)\n', (184, 200), False, 'import io\n')] |
import numpy as np
from .activation import ActivationFunc
class ReLU(ActivationFunc):
"""Relu activation function
"""
def __init__(self):
super().__init__()
def forward(self, x):
"""forward pass
"""
out = np.maximum(0, x)
self.f_val = out
return out
@property
def grad(self):
"""compute grad
"""
assert self.f_val is not None
return (self.f_val>0).astype(float)
def backward(self, grad):
"""Compute gradient
Relu's gradient is 1 if the input is >0, else gradient is 0.
This means given the upstream gradient grad, we simply threshold it
by checking whether the corresponding forward pass was >0 or not
"""
g = grad*self.grad
self.f_val = None
return g
| [
"numpy.maximum"
] | [((258, 274), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (268, 274), True, 'import numpy as np\n')] |
# Copyright 2021 ETH Zurich and the NPBench authors. All rights reserved.
import numpy as np
def initialize(N, datatype=np.int32):
seq = np.fromfunction(lambda i: (i + 1) % 4, (N, ), dtype=datatype)
return seq
| [
"numpy.fromfunction"
] | [((144, 204), 'numpy.fromfunction', 'np.fromfunction', (['(lambda i: (i + 1) % 4)', '(N,)'], {'dtype': 'datatype'}), '(lambda i: (i + 1) % 4, (N,), dtype=datatype)\n', (159, 204), True, 'import numpy as np\n')] |
"""
CEASIOMpy: Conceptual Aircraft Design Software
Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland
Center_of_gravity evaluation for unconventional aircraft with fuselage.
Function to evaluate the Center of Gravity of the aircraft.
| Works with Python 2.7
| Author : <NAME>
| Date of creation: 2018-10-12
| Last modifiction: 2019-02-20
"""
#=============================================================================
# IMPORTS
#=============================================================================
import numpy as np
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""All classes are defined inside the classes and into
the InputClasses/Unconventional folder."""
#=============================================================================
# FUNCTIONS
#=============================================================================
def center_of_gravity_evaluation(F_PERC, P_PERC, afg, awg, mw, ed, ui, bi):
""" Function to evaluate the center of gravity of airplanes given the
geometry from cpacs file and masses from weight_unc_main.py.
Source: An introduction to mechanics, 2nd ed., <NAME>
and <NAME>, Cambridge University Press.
ARGUMENTS
(int) F_PERC --Arg.: Percentage of the maximum amount of fuel allowed
(int) P_PERC --Arg.: Percentage of the maximum amount of Payload allowed
(class) afg --Arg.: AircraftFuseGeometry class.
(class) awg --Arg.: AircraftWingGeometry class.
(class) mw --Arg.: MassesWeights class.
(class) ed --Arg.: EngineData class.
(class) ui --Arg.: UserInputs class.
(class) bi --Arg.: UserInputs class.
##======= Classes are defined in the InputClasses folder =======##
RETURN
(float_array) center_of_gravity --Out.: x,y,z coordinates of the CoG.
(float_array) mass_seg_i --Out.: mass of each segment of each
component of the aircraft.
(float_array) airplane_centers_segs --Out.: point at the center of
each segment of the
aircraft.
"""
max_seg_n = np.max([np.amax(afg.fuse_seg_nb), np.amax(awg.wing_seg_nb)])
t_nb = afg.fus_nb + awg.w_nb # Number of parts not counting symmetry
tot_nb = afg.fuse_nb + awg.wing_nb # Number of parts counting symmetry
segments_nb = []
fuse_fuel_vol = 0
pass_vol = 0
for i in range(1,afg.fus_nb+1):
segments_nb.append(afg.fuse_seg_nb[i-1])
if ui.F_FUEL[i-1]:
fuse_fuel_vol += afg.fuse_fuel_vol[i-1]
if np.all(afg.cabin_seg[:,i-1]) == 1:
pass_vol += afg.fuse_vol[i-1]
else:
pass_vol += afg.fuse_cabin_vol[i-1]
htw = 0
x0 = 0
s = 0
for i in range(1,awg.w_nb+1):
segments_nb.append(awg.wing_seg_nb[i-1])
if awg.wing_sym[i-1] != 0:
segments_nb.append(awg.wing_seg_nb[i-1])
s += 1
if awg.is_horiz[i-1+s]:
if i != awg.main_wing_index:
htw = i
else:
x = np.amax(awg.wing_center_seg_point[:,i+s-1,0])
if x > x0:
tw = i
x0 = x
mass_seg_i = np.zeros((max_seg_n,tot_nb))
oem_vol = (awg.wing_tot_vol-awg.wing_fuel_vol)\
+ (np.sum(afg.fuse_vol)-fuse_fuel_vol)
# Evaluating oem density, fuel density, passenger density
if bi.USER_EN_PLACEMENT:
oem_par = (mw.operating_empty_mass-mw.mass_engines) / oem_vol
en = mw.mass_engines
else:
oem_par = mw.operating_empty_mass / oem_vol
en = 0
mpass_par = (mw.mass_payload*(P_PERC/100.0)) / pass_vol
mfuel_par = (mw.mass_fuel_tot*(F_PERC/100.0))\
/(awg.wing_fuel_vol + fuse_fuel_vol)
mtom = mw.operating_empty_mass + mw.mass_payload*(P_PERC/100)\
+ mw.mass_fuel_tot*(F_PERC/100) - en
# Definition of the mass of each segment
ex = False
wg = []
for i in range(1,afg.fus_nb+1):
if ui.F_FUEL[i-1]:
for j in range(1,afg.fuse_seg_nb[i-1]+1):
mass_seg_i[j-1][i-1] = (oem_par+(mfuel_par*ui.F_FUEL[i-1]/100))\
* afg.fuse_seg_vol[j-1][i-1]
else:
for j in range(1,afg.fuse_seg_nb[i-1]+1):
if int(afg.cabin_seg[j-1][i-1]) == 1:
mass_seg_i[j-1][i-1] = (oem_par+mpass_par)\
* afg.fuse_seg_vol[j-1][i-1]
else:
mass_seg_i[j-1][i-1] = oem_par * afg.fuse_seg_vol[j-1][i-1]
w = 0
for i in range(afg.fus_nb+1,t_nb+1):
for j in range(1,awg.wing_seg_nb[i-1-afg.fus_nb]+1):
if awg.is_horiz[i+w-1-afg.fus_nb]:
mass_seg_i[j-1][i-1+w] = oem_par\
* (awg.wing_seg_vol[j-1][i-1-afg.fus_nb]\
- awg.wing_fuel_seg_vol[j-1][i-1-afg.fus_nb])\
+ mfuel_par * (awg.wing_fuel_seg_vol[j-1][i-1-afg.fus_nb])
else:
mass_seg_i[j-1][i-1+w] = oem_par\
* awg.wing_seg_vol[j-1][i-1-afg.fus_nb]
wg.append(i-afg.fus_nb)
if awg.wing_sym[i-1-afg.fus_nb] != 0:
w += 1
mass_seg_i[:,i-1+w]=mass_seg_i[:,i-2+w]
wg.append(i-afg.fus_nb)
if i+w == tot_nb:
break
# Mass check
while not ex:
if abs(round(mtom,3) - round(np.sum(mass_seg_i),3)) < 0.0001:
ex = True
else:
mass = (round(mtom,3) - round(np.sum(mass_seg_i),3))/2
if not ed.WING_MOUNTED:
if htw != 0:
a = wg.index(htw)
else:
a = wg.index(tw)
else:
a = wg.index(awg.main_wing_index)
mass_seg_i[0][afg.fuse_nb+a] = mass_seg_i[0][afg.fuse_nb+a] + mass
if awg.is_horiz[a]:
mass_seg_i[0][afg.fuse_nb+a+1]\
= mass_seg_i[0][afg.fuse_nb+a+1] + mass
else:
mass_seg_i[0][afg.fuse_nb+a]\
= mass_seg_i[0][afg.fuse_nb+a] + mass
awg.wing_center_seg_point.resize(max_seg_n,awg.wing_nb,3)
afg.fuse_center_seg_point.resize(max_seg_n,afg.fuse_nb,3)
airplane_centers_segs = np.concatenate((afg.fuse_center_seg_point,\
awg.wing_center_seg_point),1)
# CoG evalution
if bi.USER_EN_PLACEMENT:
cog_enx = np.sum(ed.EN_PLACEMENT[:,0]*ed.en_mass)
cog_eny = np.sum(ed.EN_PLACEMENT[:,1]*ed.en_mass)
cog_enz = np.sum(ed.EN_PLACEMENT[:,2]*ed.en_mass)
else:
cog_enx = 0.0
cog_eny = 0.0
cog_enz = 0.0
center_of_gravity=[]
center_of_gravity.append(round((np.sum(airplane_centers_segs[:,:,0]\
*mass_seg_i) + cog_enx) / mtom,3))
center_of_gravity.append(round((np.sum(airplane_centers_segs[:,:,1]\
*mass_seg_i) + cog_eny) / mtom,3))
center_of_gravity.append(round((np.sum(airplane_centers_segs[:,:,2]\
*mass_seg_i) + cog_enz) / mtom,3))
for i in range(1,4):
if abs(center_of_gravity[i-1]) < 10**(-5):
center_of_gravity[i-1] = 0.0
return(center_of_gravity, mass_seg_i, airplane_centers_segs)
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('#########################################################')
log.warning('# ERROR NOT A STANDALONE PROGRAM, RUN balanceuncmain.py #')
log.warning('#########################################################')
| [
"numpy.sum",
"numpy.concatenate",
"numpy.zeros",
"numpy.amax",
"numpy.all"
] | [((3507, 3536), 'numpy.zeros', 'np.zeros', (['(max_seg_n, tot_nb)'], {}), '((max_seg_n, tot_nb))\n', (3515, 3536), True, 'import numpy as np\n'), ((6628, 6701), 'numpy.concatenate', 'np.concatenate', (['(afg.fuse_center_seg_point, awg.wing_center_seg_point)', '(1)'], {}), '((afg.fuse_center_seg_point, awg.wing_center_seg_point), 1)\n', (6642, 6701), True, 'import numpy as np\n'), ((6814, 6856), 'numpy.sum', 'np.sum', (['(ed.EN_PLACEMENT[:, 0] * ed.en_mass)'], {}), '(ed.EN_PLACEMENT[:, 0] * ed.en_mass)\n', (6820, 6856), True, 'import numpy as np\n'), ((6872, 6914), 'numpy.sum', 'np.sum', (['(ed.EN_PLACEMENT[:, 1] * ed.en_mass)'], {}), '(ed.EN_PLACEMENT[:, 1] * ed.en_mass)\n', (6878, 6914), True, 'import numpy as np\n'), ((6930, 6972), 'numpy.sum', 'np.sum', (['(ed.EN_PLACEMENT[:, 2] * ed.en_mass)'], {}), '(ed.EN_PLACEMENT[:, 2] * ed.en_mass)\n', (6936, 6972), True, 'import numpy as np\n'), ((2393, 2417), 'numpy.amax', 'np.amax', (['afg.fuse_seg_nb'], {}), '(afg.fuse_seg_nb)\n', (2400, 2417), True, 'import numpy as np\n'), ((2419, 2443), 'numpy.amax', 'np.amax', (['awg.wing_seg_nb'], {}), '(awg.wing_seg_nb)\n', (2426, 2443), True, 'import numpy as np\n'), ((2836, 2867), 'numpy.all', 'np.all', (['afg.cabin_seg[:, i - 1]'], {}), '(afg.cabin_seg[:, i - 1])\n', (2842, 2867), True, 'import numpy as np\n'), ((3362, 3413), 'numpy.amax', 'np.amax', (['awg.wing_center_seg_point[:, i + s - 1, 0]'], {}), '(awg.wing_center_seg_point[:, i + s - 1, 0])\n', (3369, 3413), True, 'import numpy as np\n'), ((3606, 3626), 'numpy.sum', 'np.sum', (['afg.fuse_vol'], {}), '(afg.fuse_vol)\n', (3612, 3626), True, 'import numpy as np\n'), ((7108, 7159), 'numpy.sum', 'np.sum', (['(airplane_centers_segs[:, :, 0] * mass_seg_i)'], {}), '(airplane_centers_segs[:, :, 0] * mass_seg_i)\n', (7114, 7159), True, 'import numpy as np\n'), ((7245, 7296), 'numpy.sum', 'np.sum', (['(airplane_centers_segs[:, :, 1] * mass_seg_i)'], {}), '(airplane_centers_segs[:, :, 1] * mass_seg_i)\n', (7251, 7296), True, 'import numpy as np\n'), ((7382, 7433), 'numpy.sum', 'np.sum', (['(airplane_centers_segs[:, :, 2] * mass_seg_i)'], {}), '(airplane_centers_segs[:, :, 2] * mass_seg_i)\n', (7388, 7433), True, 'import numpy as np\n'), ((5767, 5785), 'numpy.sum', 'np.sum', (['mass_seg_i'], {}), '(mass_seg_i)\n', (5773, 5785), True, 'import numpy as np\n'), ((5878, 5896), 'numpy.sum', 'np.sum', (['mass_seg_i'], {}), '(mass_seg_i)\n', (5884, 5896), True, 'import numpy as np\n')] |
import unittest
import sys
if sys.path[0].endswith("dummies"):
sys.path = sys.path[1:]
import vlogging
class BasicTestCase(unittest.TestCase):
def test_nothing(self):
s = str(vlogging.VisualRecord())
self.assertTrue("<hr/>" in s)
def test_text_only(self):
s = str(vlogging.VisualRecord(title="title", footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
def test_all_renderers(self):
self.assertEqual(len(vlogging.renderers), 3)
def test_invalid_images(self):
s = str(vlogging.VisualRecord(
title="title",
imgs="foobar",
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 0)
s = str(vlogging.VisualRecord(
title="title",
imgs=["foobar", 1, 2, dict()],
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 0)
def test_pil(self):
from PIL import Image
pil_image = Image.open('vlogging/tests/lenna.jpg')
s = str(vlogging.VisualRecord(
title="title",
imgs=pil_image,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertTrue("image/png" in s)
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[pil_image],
footnotes="footnotes"))
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[pil_image, pil_image],
footnotes="footnotes",
fmt="jpeg"))
self.assertTrue("image/jpeg" in s)
self.assertEqual(s.count("<img"), 2)
def test_opencv(self):
import cv2
cv_image = cv2.imread('vlogging/tests/lenna.jpg')
s = str(vlogging.VisualRecord(
title="title",
imgs=cv_image,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[cv_image],
footnotes="footnotes"))
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[cv_image, cv_image],
footnotes="footnotes"))
self.assertEqual(s.count("<img"), 2)
def test_pylab_basic(self):
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0., 5., 0.2)
plt.plot(t, t, 'r--', t, t ** 2, 'bs', t, t ** 3, 'g^')
s = str(vlogging.VisualRecord(
title="title",
imgs=plt,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 1)
def test_pylab_figure(self):
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0., 5., 0.2)
fig = plt.figure()
plt.plot(t, t, 'r--', t, t ** 2, 'bs', t, t ** 3, 'g^')
s = str(vlogging.VisualRecord(
title="title",
imgs=fig,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 1)
| [
"matplotlib.pyplot.plot",
"PIL.Image.open",
"cv2.imread",
"matplotlib.pyplot.figure",
"numpy.arange",
"vlogging.VisualRecord"
] | [((1288, 1326), 'PIL.Image.open', 'Image.open', (['"""vlogging/tests/lenna.jpg"""'], {}), "('vlogging/tests/lenna.jpg')\n", (1298, 1326), False, 'from PIL import Image\n'), ((2166, 2204), 'cv2.imread', 'cv2.imread', (['"""vlogging/tests/lenna.jpg"""'], {}), "('vlogging/tests/lenna.jpg')\n", (2176, 2204), False, 'import cv2\n'), ((2977, 3001), 'numpy.arange', 'np.arange', (['(0.0)', '(5.0)', '(0.2)'], {}), '(0.0, 5.0, 0.2)\n', (2986, 3001), True, 'import numpy as np\n'), ((3009, 3064), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 't', '"""r--"""', 't', '(t ** 2)', '"""bs"""', 't', '(t ** 3)', '"""g^"""'], {}), "(t, t, 'r--', t, t ** 2, 'bs', t, t ** 3, 'g^')\n", (3017, 3064), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3492), 'numpy.arange', 'np.arange', (['(0.0)', '(5.0)', '(0.2)'], {}), '(0.0, 5.0, 0.2)\n', (3477, 3492), True, 'import numpy as np\n'), ((3506, 3518), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3516, 3518), True, 'import matplotlib.pyplot as plt\n'), ((3527, 3582), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 't', '"""r--"""', 't', '(t ** 2)', '"""bs"""', 't', '(t ** 3)', '"""g^"""'], {}), "(t, t, 'r--', t, t ** 2, 'bs', t, t ** 3, 'g^')\n", (3535, 3582), True, 'import matplotlib.pyplot as plt\n'), ((196, 219), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {}), '()\n', (217, 219), False, 'import vlogging\n'), ((306, 365), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'footnotes': '"""footnotes"""'}), "(title='title', footnotes='footnotes')\n", (327, 365), False, 'import vlogging\n'), ((625, 699), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': '"""foobar"""', 'footnotes': '"""footnotes"""'}), "(title='title', imgs='foobar', footnotes='footnotes')\n", (646, 699), False, 'import vlogging\n'), ((1343, 1418), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': 'pil_image', 'footnotes': '"""footnotes"""'}), "(title='title', imgs=pil_image, footnotes='footnotes')\n", (1364, 1418), False, 'import vlogging\n'), ((1680, 1757), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': '[pil_image]', 'footnotes': '"""footnotes"""'}), "(title='title', imgs=[pil_image], footnotes='footnotes')\n", (1701, 1757), False, 'import vlogging\n'), ((1859, 1964), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': '[pil_image, pil_image]', 'footnotes': '"""footnotes"""', 'fmt': '"""jpeg"""'}), "(title='title', imgs=[pil_image, pil_image], footnotes\n ='footnotes', fmt='jpeg')\n", (1880, 1964), False, 'import vlogging\n'), ((2221, 2295), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': 'cv_image', 'footnotes': '"""footnotes"""'}), "(title='title', imgs=cv_image, footnotes='footnotes')\n", (2242, 2295), False, 'import vlogging\n'), ((2515, 2591), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': '[cv_image]', 'footnotes': '"""footnotes"""'}), "(title='title', imgs=[cv_image], footnotes='footnotes')\n", (2536, 2591), False, 'import vlogging\n'), ((2693, 2784), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': '[cv_image, cv_image]', 'footnotes': '"""footnotes"""'}), "(title='title', imgs=[cv_image, cv_image], footnotes=\n 'footnotes')\n", (2714, 2784), False, 'import vlogging\n'), ((3082, 3151), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': 'plt', 'footnotes': '"""footnotes"""'}), "(title='title', imgs=plt, footnotes='footnotes')\n", (3103, 3151), False, 'import vlogging\n'), ((3600, 3669), 'vlogging.VisualRecord', 'vlogging.VisualRecord', ([], {'title': '"""title"""', 'imgs': 'fig', 'footnotes': '"""footnotes"""'}), "(title='title', imgs=fig, footnotes='footnotes')\n", (3621, 3669), False, 'import vlogging\n')] |
# Copyright (c) Alibaba Inc. All rights reserved.
import argparse
import cv2
import numpy as np
import os
import shutil
# Parse command line arguments.
parser = argparse.ArgumentParser(description='Resize HPatches sequence images.')
parser.add_argument('--input_dir', type=str, default='./hpatches-sequences-release',
help='Dir of the hpatches-sequences-release.')
parser.add_argument('--output_dir', type=str, default='./hpatches-sequences-resize',
help='Dir to store the resized result of hpatches-sequences-release.')
parser.add_argument('--max_edge', type=int, default=640,
help='Max edge of height or width, resize the image if it is too large.')
def resize_imgs(input_dir, output_dir, max_edge):
cell = 16
print('Resize all images in {0} and save into {1}.'.format(input_dir, output_dir))
min_height = 1e5
min_width = 1e5
for seq_name in os.listdir(input_dir):
seq_path = os.path.join(output_dir, seq_name)
if os.path.exists(seq_path):
shutil.rmtree(seq_path)
os.makedirs(seq_path)
print(seq_path)
scale = 1.
for i in range(1,7):
img_path = os.path.join(input_dir, seq_name, str(i) + '.ppm')
result_path = os.path.join(output_dir, seq_name, str(i) + '.ppm')
print(' Resize image {0} and save into {1}.'.format(img_path, result_path))
img = cv2.imread(img_path)
height_init, width_init, _ = img.shape
if height_init < min_height:
min_height = height_init
if width_init < min_width:
min_width = width_init
scale0 = max_edge / height_init
scale1 = max_edge / width_init
scale = scale0 if scale0 < scale1 else scale1
if scale < 1:
img = cv2.resize(img, (int(scale*width_init), int(scale*height_init)), interpolation=cv2.INTER_LINEAR)
height_out = int(img.shape[0]/cell)*cell
width_out = int(img.shape[1]/cell)*cell
img = img[:height_out, :width_out, :]
cv2.imwrite(result_path, img)
for i in range(2, 7):
homo_path = os.path.join(input_dir, seq_name, 'H_1_' + str(i))
result_path = os.path.join(output_dir, seq_name, 'H_1_' + str(i))
homography = np.loadtxt(homo_path)
homography[0,2] = homography[0,2] * scale
homography[1,2] = homography[1,2] * scale
homography[2,0] = homography[2,0] / scale
homography[2,1] = homography[2,1] / scale
homography = np.savetxt(result_path, homography)
if __name__ == '__main__':
args = parser.parse_args()
resize_imgs(args.input_dir, args.output_dir, args.max_edge)
| [
"os.makedirs",
"argparse.ArgumentParser",
"cv2.imwrite",
"numpy.savetxt",
"os.path.exists",
"cv2.imread",
"numpy.loadtxt",
"shutil.rmtree",
"os.path.join",
"os.listdir"
] | [((163, 234), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Resize HPatches sequence images."""'}), "(description='Resize HPatches sequence images.')\n", (186, 234), False, 'import argparse\n'), ((879, 900), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (889, 900), False, 'import os\n'), ((921, 955), 'os.path.join', 'os.path.join', (['output_dir', 'seq_name'], {}), '(output_dir, seq_name)\n', (933, 955), False, 'import os\n'), ((967, 991), 'os.path.exists', 'os.path.exists', (['seq_path'], {}), '(seq_path)\n', (981, 991), False, 'import os\n'), ((1037, 1058), 'os.makedirs', 'os.makedirs', (['seq_path'], {}), '(seq_path)\n', (1048, 1058), False, 'import os\n'), ((1005, 1028), 'shutil.rmtree', 'shutil.rmtree', (['seq_path'], {}), '(seq_path)\n', (1018, 1028), False, 'import shutil\n'), ((1391, 1411), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1401, 1411), False, 'import cv2\n'), ((2080, 2109), 'cv2.imwrite', 'cv2.imwrite', (['result_path', 'img'], {}), '(result_path, img)\n', (2091, 2109), False, 'import cv2\n'), ((2319, 2340), 'numpy.loadtxt', 'np.loadtxt', (['homo_path'], {}), '(homo_path)\n', (2329, 2340), True, 'import numpy as np\n'), ((2582, 2617), 'numpy.savetxt', 'np.savetxt', (['result_path', 'homography'], {}), '(result_path, homography)\n', (2592, 2617), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from astropy.io import fits
from astropy.wcs import WCS
from skimage.draw import polygon
if __name__ == '__main__':
import sys
det = sys.argv[1] if (len(sys.argv) > 1) else 'all'
df = pd.read_csv('../header_info.csv', index_col=0)
if det != 'all':
df = df.query(f'DETECTOR == "{det}"')
w = WCS('data/M33_SDSS9_r.fits')
img = np.zeros(w._naxis[::-1], dtype=np.float32)
for i, row in df.iterrows():
ra = row.filter(regex='^RA_CHIP1_[0-3]$').astype(float)
dec = row.filter(regex='^DEC_CHIP1_[0-3]$').astype(float)
xpix, ypix = np.round(w.all_world2pix(ra, dec, 0)).astype(int)
rr, cc = polygon(ypix, xpix, img.shape)
img[rr, cc] += row.EXPTIME
if np.isfinite(row.filter(regex='^RA_CHIP2_[0-3]$').astype(float)).all():
ra = row.filter(regex='^RA_CHIP2_[0-3]$').astype(float)
dec = row.filter(regex='^DEC_CHIP2_[0-3]$').astype(float)
xpix, ypix = np.round(w.all_world2pix(ra, dec, 0)).astype(int)
rr, cc = polygon(ypix, xpix, img.shape)
img[rr, cc] += row.EXPTIME
wh2 = w.to_header()
hdu = fits.PrimaryHDU(header=wh2, data=img)
hdulist = fits.HDUList([hdu])
hdulist.writeto(f'data/exposure_map_{det}_small.fits', overwrite=True) | [
"skimage.draw.polygon",
"pandas.read_csv",
"astropy.io.fits.PrimaryHDU",
"numpy.zeros",
"astropy.wcs.WCS",
"astropy.io.fits.HDUList"
] | [((237, 283), 'pandas.read_csv', 'pd.read_csv', (['"""../header_info.csv"""'], {'index_col': '(0)'}), "('../header_info.csv', index_col=0)\n", (248, 283), True, 'import pandas as pd\n'), ((359, 387), 'astropy.wcs.WCS', 'WCS', (['"""data/M33_SDSS9_r.fits"""'], {}), "('data/M33_SDSS9_r.fits')\n", (362, 387), False, 'from astropy.wcs import WCS\n'), ((399, 441), 'numpy.zeros', 'np.zeros', (['w._naxis[::-1]'], {'dtype': 'np.float32'}), '(w._naxis[::-1], dtype=np.float32)\n', (407, 441), True, 'import numpy as np\n'), ((1180, 1217), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'header': 'wh2', 'data': 'img'}), '(header=wh2, data=img)\n', (1195, 1217), False, 'from astropy.io import fits\n'), ((1232, 1251), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu]'], {}), '([hdu])\n', (1244, 1251), False, 'from astropy.io import fits\n'), ((693, 723), 'skimage.draw.polygon', 'polygon', (['ypix', 'xpix', 'img.shape'], {}), '(ypix, xpix, img.shape)\n', (700, 723), False, 'from skimage.draw import polygon\n'), ((1075, 1105), 'skimage.draw.polygon', 'polygon', (['ypix', 'xpix', 'img.shape'], {}), '(ypix, xpix, img.shape)\n', (1082, 1105), False, 'from skimage.draw import polygon\n')] |
# ****************************************************************
# library import block
# ****************************************************************
import numpy as np
import tensorflow as tf
import pandas as pd
import os
import logging
import time
import sys
from scipy.cluster.vq import kmeans
import pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
plt.switch_backend('agg')
float_type = tf.float64
jitter_level = 1e-5
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def onoff(Xtrain,Ytrain,Xtest,Ytest,dir):
tf.reset_default_graph()
parentDir = "/l/hegdep1/onoffgp/uai/experiments/pptr"
sys.path.append(parentDir)
from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron
from onofftf.utils import modelmanager
from gpflow import transforms
modelPath = dir
tbPath = dir
logPath = dir + 'modelsumm.log'
logger = logging.getLogger('log')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.FileHandler(logPath))
logger.info("traning size = " + str(Xtrain.shape[0]))
logger.info("test size = " + str(Xtest.shape[0]))
traindf = pd.DataFrame({'ndatehour':Xtrain[:,2].flatten()*1000,'pptr':Ytrain.flatten()})
train_data = DataSet(Xtrain, Ytrain)
logger.info("number of training examples:" + str(Xtrain.shape))
# ****************************************************************
# parameter initializations
# ****************************************************************
list_to_np = lambda _list : [np.array(e) for e in _list]
num_iter = 50000
num_inducing_f = np.array([10,100])
num_inducing_g = np.array([10,100])
num_data = Xtrain.shape[0]
num_minibatch = 1000
init_fkell = list_to_np([[8., 8.],[5./1000]])
init_fkvar = list_to_np([[20.],[20.]])
init_gkell = list_to_np([[8.,8.],[5./1000]])
init_gkvar = list_to_np([[10.],[10.]])
init_noisevar = 0.01
q_diag = True
init_Zf_s = kmeans(Xtrain[:,0:2],num_inducing_f[0])[0]
init_Zf_t = np.expand_dims(np.linspace(Xtrain[:,2].min(),Xtrain[:,2].max(),num_inducing_f[1]),axis=1)
init_Zf = [init_Zf_s,init_Zf_t]
init_u_fm = np.random.randn(np.prod(num_inducing_f),1)*0.1
init_u_fs_sqrt = np.ones(np.prod(num_inducing_f)).reshape(1,-1).T
init_Zg = init_Zf.copy()
init_u_gm = np.random.randn(np.prod(num_inducing_g),1)*0.1
init_u_gs_sqrt = np.ones(np.prod(num_inducing_g)).reshape(1,-1).T
kern_param_learning_rate = 1e-3
indp_param_learning_rate = 1e-3
# ****************************************************************
# define tensorflow variables and placeholders
# ****************************************************************
X = tf.placeholder(dtype = float_type)
Y = tf.placeholder(dtype = float_type)
with tf.name_scope("f_kern"):
fkell = [Param(init_fkell[i],transform=transforms.Log1pe(),
name="lengthscale",learning_rate = kern_param_learning_rate,summ=True)
for i in range(len(num_inducing_f))]
fkvar = [Param(init_fkvar[i],transform=transforms.Log1pe(),
name="variance",learning_rate = kern_param_learning_rate,summ=True)
for i in range(len(num_inducing_f))]
fkern_list = [KernSE(fkell[i],fkvar[i]) for i in range(len(num_inducing_f))]
with tf.name_scope("g_kern"):
gkell = [Param(init_gkell[i],transform=transforms.Log1pe(),
name="lengthscale",learning_rate = kern_param_learning_rate,summ=True)
for i in range(len(num_inducing_g))]
gkvar = [Param(init_gkvar[i],transform=transforms.Log1pe(),
name="variance",learning_rate = kern_param_learning_rate,summ=True)
for i in range(len(num_inducing_g))]
gkern_list = [KernSE(gkell[i],gkvar[i]) for i in range(len(num_inducing_g))]
with tf.name_scope("likelihood"):
noisevar = Param(init_noisevar,transform=transforms.Log1pe(),
name="variance",learning_rate = kern_param_learning_rate,summ=True)
with tf.name_scope("f_ind"):
Zf_list = [Param(init_Zf[i],name="z",learning_rate = indp_param_learning_rate,summ=True)
for i in range(len(num_inducing_f))]
u_fm = Param(init_u_fm,name="value",learning_rate = indp_param_learning_rate,summ=True)
if q_diag:
u_fs_sqrt = Param(init_u_fs_sqrt,transforms.positive,
name="variance",learning_rate = indp_param_learning_rate,summ=True)
else:
u_fs_sqrt = Param(init_u_fs_sqrt,transforms.LowerTriangular(init_u_fs_sqrt.shape[0]),
name="variance",learning_rate = indp_param_learning_rate,summ=True)
with tf.name_scope("g_ind"):
Zg_list = [Param(init_Zg[i],name="z",learning_rate = indp_param_learning_rate,summ=True)
for i in range(len(num_inducing_g))]
u_gm = Param(init_u_gm,name="value",learning_rate = indp_param_learning_rate,summ=True)
if q_diag:
u_gs_sqrt = Param(init_u_gs_sqrt,transforms.positive,
name="variance",learning_rate = indp_param_learning_rate,summ=True)
else:
u_gs_sqrt = Param(init_u_gs_sqrt,transforms.LowerTriangular(init_u_gs_sqrt.shape[0]),
name="variance",learning_rate = indp_param_learning_rate,summ=True)
# ****************************************************************
# define model support functions
# ****************************************************************
def build_prior_kl(u_fm, u_fs_sqrt, fkern_list, Zf_list,
u_gm, u_gs_sqrt, gkern_list, Zg_list, whiten=False):
if whiten:
raise NotImplementedError()
else:
Kfmm = [fkern_list[i].K(Zf_list[i].get_tfv()) + \
tf.eye(num_inducing_f[i], dtype=float_type) * jitter_level
for i in range(len(num_inducing_f))]
Kgmm = [gkern_list[i].K(Zg_list[i].get_tfv()) + \
tf.eye(num_inducing_g[i], dtype=float_type) * jitter_level
for i in range(len(num_inducing_g))]
KL = GaussKLkron(u_fm.get_tfv(), u_fs_sqrt.get_tfv(), Kfmm) + \
GaussKLkron(u_gm.get_tfv(), u_gs_sqrt.get_tfv(), Kgmm)
return KL
def build_predict(Xnew,u_fm,u_fs_sqrt,fkern_list,Zf_list,u_gm,u_gs_sqrt,gkern_list,Zg_list,f_mu=None):
input_mask_f = _gen_inp_mask(Zf_list)
input_mask_g = _gen_inp_mask(Zg_list)
# compute fmean and fvar from the kronecker inference
fmean,fvar = kron_inf(Xnew,fkern_list,Zf_list,u_fm,u_fs_sqrt,num_inducing_f,input_mask_f)
if not f_mu is None :
fmean = fmean + f_mu.get_tfv()
# compute gmean and gvar from the kronecker inference
gmean,gvar = kron_inf(Xnew,gkern_list,Zg_list,u_gm,u_gs_sqrt,num_inducing_g,input_mask_g)
# compute augemented distributions
ephi_g, ephi2_g, evar_phi_g = probit_expectations(gmean, gvar)
# compute augmented f
# p(f|g) = N(f| diag(ephi_g)* A*u_fm, diag(evar_phi_g)) * (Kfnn + A(u_fs - Kfmm)t(A)))
gfmean = tf.multiply(ephi_g, fmean)
gfvar = tf.multiply(ephi2_g, fvar)
gfmeanu = tf.multiply(evar_phi_g, tf.square(fmean))
# return mean and variance vectors in order
return gfmean, gfvar, gfmeanu, fmean, fvar, gmean, gvar, ephi_g, evar_phi_g
def kron_inf(Xnew,kern_list,Z_list,q_mu,q_sqrt,num_inducing,input_mask):
# Compute alpha = K_mm^-1 * f_m
Kmm = [kern_list[p].K(Z_list[p].get_tfv()) + \
tf.eye(num_inducing[p], dtype=float_type) * jitter_level
for p in range(len(num_inducing))]
Kmm_inv = [tf.matrix_inverse(Kmm[p]) for p in range(len(num_inducing))]
alpha = __kron_mv(Kmm_inv,q_mu.get_tfv())
n_batch = tf.stack([tf.shape(Xnew)[0],np.int32(1)])
Knn = tf.ones(n_batch, dtype=float_type)
Kmn_kron = []
for p in range(len(num_inducing)):
xnew = tf.gather(Xnew, input_mask[p], axis=1)
Knn *= tf.reshape(kern_list[p].Kdiag(xnew), n_batch)
Kmn_kron.append(kern_list[p].K(Z_list[p].get_tfv(), xnew))
S = tf.diag(tf.squeeze(tf.square(q_sqrt.get_tfv())))
Kmn = tf.reshape(tf.multiply(tf.expand_dims(Kmn_kron[0],1),Kmn_kron[1]),[np.prod(num_inducing),-1])
A = tf.matmul(tf_kron(*Kmm_inv),Kmn)
mu = tf.matmul(Kmn, alpha, transpose_a=True)
var = Knn - tf.reshape(tf.matrix_diag_part(tf.matmul(Kmn, A,transpose_a=True) - \
tf.matmul(tf.matmul(A,S,transpose_a=True),A)),[-1,1])
return mu , var
def __kron_mv( As, x):
num_inducing = [int(As[p].get_shape()[0]) for p in range(len(As))]
N = np.prod(num_inducing)
b = tf.reshape(x, [N,1])
for p in range(len(As)):
Ap = As[p]
X = tf.reshape(b, (num_inducing[p],
np.round(N/num_inducing[p]).astype(np.int)))
b = tf.matmul(X, Ap, transpose_a=True, transpose_b=True)
b = tf.reshape(b, [N,1])
return b
def tf_kron(*args):
def __tf_kron(a,b):
a_shape = [tf.shape(a)[0],tf.shape(a)[1]]
b_shape = [tf.shape(b)[0],tf.shape(b)[1]]
return tf.reshape(tf.reshape(a,[a_shape[0],1,a_shape[1],1])* \
tf.reshape(b,[1,b_shape[0],1,b_shape[1]]),
[a_shape[0]*b_shape[0],a_shape[1]*b_shape[1]])
kron_pord = tf.constant(1.,shape=[1,1],dtype=float_type)
for Ap in args:
kron_pord = __tf_kron(kron_pord,Ap)
return kron_pord
def _gen_inp_mask(Z_list):
input_mask = []
tmp = 0
for p in range(len(Z_list)):
p_dim = Z_list[p].shape[1]
input_mask.append(np.arange(tmp, tmp + p_dim, dtype=np.int32))
tmp += p_dim
return input_mask
def variational_expectations(Y, fmu, fvar, fmuvar, noisevar):
return -0.5 * np.log(2 * np.pi) - 0.5 * tf.log(noisevar) \
- 0.5 * (tf.square(Y - fmu) + fvar + fmuvar) / noisevar
def probit_expectations(gmean, gvar):
def normcdf(x):
return 0.5 * (1.0 + tf.erf(x / np.sqrt(2.0))) * (1. - 2.e-3) + 1.e-3
def owent(h, a):
h = tf.abs(h)
term1 = tf.atan(a) / (2 * np.pi)
term2 = tf.exp((-1 / 2) * (tf.multiply(tf.square(h), (tf.square(a) + 1))))
return tf.multiply(term1, term2)
z = gmean / tf.sqrt(1. + gvar)
a = 1 / tf.sqrt(1. + (2 * gvar))
cdfz = normcdf(z)
tz = owent(z, a)
ephig = cdfz
ephisqg = (cdfz - 2. * tz)
evarphig = (cdfz - 2. * tz - tf.square(cdfz))
# clip negative values from variance terms to zero
ephisqg = (ephisqg + tf.abs(ephisqg)) / 2.
evarphig = (evarphig + tf.abs(evarphig)) / 2.
return ephig, ephisqg, evarphig
# ****************************************************************
# build model and define lower bound
# ****************************************************************
# get kl term
with tf.name_scope("kl"):
kl = build_prior_kl(u_fm,u_fs_sqrt,fkern_list,Zf_list,
u_gm,u_gs_sqrt,gkern_list,Zg_list)
tf.summary.scalar('kl', kl)
# get augmented functions
with tf.name_scope("model_build"):
gfmean, gfvar, gfmeanu, fmean, fvar, gmean, gvar, pgmean, pgvar = build_predict(X,u_fm,u_fs_sqrt,fkern_list,Zf_list,
u_gm,u_gs_sqrt,gkern_list,Zg_list)
tf.summary.histogram('gfmean',gfmean)
tf.summary.histogram('gfvar',gfvar)
tf.summary.histogram('gfmeanu',gfmeanu)
tf.summary.histogram('fmean',fmean)
tf.summary.histogram('fvar',fvar)
tf.summary.histogram('gmean',gmean)
tf.summary.histogram('gvar',gvar)
tf.summary.histogram('pgmean',pgmean)
tf.summary.histogram('pgvar',pgvar)
# compute likelihood
with tf.name_scope("var_exp"):
var_exp = tf.reduce_sum(variational_expectations(Y,gfmean,gfvar,gfmeanu,noisevar.get_tfv()))
tf.summary.scalar('var_exp', var_exp)
# mini-batch scaling
scale = tf.cast(num_data, float_type) / tf.cast(num_minibatch, float_type)
var_exp_scaled = var_exp * scale
tf.summary.scalar('var_exp_scaled', var_exp_scaled)
# final lower bound
with tf.name_scope("cost"):
cost = -(var_exp_scaled - kl)
tf.summary.scalar('cost',cost)
# ****************************************************************
# define optimizer op
# ****************************************************************
all_var_list = tf.trainable_variables()
all_lr_list = [var._learning_rate for var in all_var_list]
train_opt_group = []
for group_learning_rate in set(all_lr_list):
_ind_bool = np.where(np.isin(np.array(all_lr_list),group_learning_rate))[0]
group_var_list = [all_var_list[ind] for ind in _ind_bool]
group_tf_optimizer = tf.train.AdamOptimizer(learning_rate = group_learning_rate)
group_grad_list = tf.gradients(cost,group_var_list)
group_grads_and_vars = list(zip(group_grad_list,group_var_list))
group_train_op = group_tf_optimizer.apply_gradients(group_grads_and_vars)
# Summarize all gradients
for grad, var in group_grads_and_vars:
tf.summary.histogram(var.name + '/gradient', grad)
train_opt_group.append({'names':[var.name for var in group_var_list],
'vars':group_var_list,
'learning_rate':group_learning_rate,
'grads':group_grad_list,
'train_op':group_train_op})
train_op = tf.group(*[group['train_op'] for group in train_opt_group])
# ****************************************************************
# define graph and run optimization
# ****************************************************************
sess = tf.InteractiveSession()
# model saver
saver = tf.train.Saver()
# tensorboard summary
summ_merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(tbPath,
graph=sess.graph)
sess.run(tf.global_variables_initializer())
logger.info('******* started optimization at ' + time.strftime('%Y%m%d-%H%M') + " *******")
optstime = time.time()
logger.info(
'{:>16s}'.format("iteration") + '{:>6s}'.format("time"))
for i in range(num_iter):
optstime = time.time()
batch = train_data.next_batch(num_minibatch)
try:
summary, _ = sess.run([summ_merged,train_op],
feed_dict={X : batch[0],
Y : batch[1]
})
if i% 200 == 0:
logger.info(
'{:>16d}'.format(i) + '{:>6.3f}'.format((time.time() - optstime)/60))
summary_writer.add_summary(summary,i)
summary_writer.flush()
if i% 10000 == 0:
modelmngr = modelmanager(saver, sess, modelPath)
modelmngr.save()
# ****************************************************************
# plot inducing monitoring plots
# ****************************************************************
lp_u_fm = u_fm.get_tfv().eval().flatten()
lp_u_gm = u_gm.get_tfv().eval().flatten()
lp_zf_t = Zf_list[1].get_tfv().eval().flatten()
lp_zg_t = Zg_list[1].get_tfv().eval().flatten()
lp_zf_sort_ind = np.argsort(lp_zf_t)
lp_zg_sort_ind = np.argsort(lp_zg_t)
scale_z = 1000
mpl.rcParams['figure.figsize'] = (16,8)
fig, (ax1,ax2,ax3) = plt.subplots(3, 1, sharex=True)
mean_pptr = traindf.groupby('ndatehour')['pptr'].mean()
ax1.bar(mean_pptr.index, mean_pptr.values, align='center')
for m in np.arange(num_inducing_f[0]):
u_fm_temporal = lp_u_fm[m*num_inducing_f[1]:(m+1)*num_inducing_f[1]]
ax2.plot(np.round(lp_zf_t[lp_zf_sort_ind] * scale_z,4),u_fm_temporal[lp_zf_sort_ind],alpha=0.7)
ax2.scatter(np.round(lp_zf_t[lp_zf_sort_ind] * scale_z,4),np.ones([num_inducing_f[1],1])*lp_u_fm.min(),color="#514A30")
for m in np.arange(num_inducing_g[0]):
u_gm_temporal = lp_u_gm[m*num_inducing_g[1]:(m+1)*num_inducing_g[1]]
ax3.plot(np.round(lp_zg_t[lp_zg_sort_ind] * scale_z,4),u_gm_temporal[lp_zg_sort_ind],alpha=0.7)
ax3.scatter(np.round(lp_zg_t[lp_zg_sort_ind] * scale_z,4),np.ones([num_inducing_g[1],1])*lp_u_gm.min(),color="#514A30")
fig.savefig(dir +"inducing_"+str(i)+".png")
except KeyboardInterrupt as e:
print("Stopping training")
break
modelmngr = modelmanager(saver, sess, modelPath)
modelmngr.save()
summary_writer.close()
# ****************************************************************
# param summary
# ****************************************************************
logger.info("Noise variance = " + str(noisevar.get_tfv().eval()))
logger.info("Kf spatial lengthscale = " + str(fkell[0].get_tfv().eval()))
logger.info("Kf spatial variance = " + str(fkvar[0].get_tfv().eval()))
logger.info("Kf temporal lengthscale = " + str(fkell[1].get_tfv().eval()))
logger.info("Kf temporal variance = " + str(fkvar[1].get_tfv().eval()))
logger.info("Kg spatial lengthscale = " + str(gkell[0].get_tfv().eval()))
logger.info("Kg spatial variance = " + str(gkvar[0].get_tfv().eval()))
logger.info("Kg temporal lengthscale = " + str(gkell[1].get_tfv().eval()))
logger.info("Kg temporal variance = " + str(gkvar[1].get_tfv().eval()))
# ****************************************************************
# model predictions
# ****************************************************************
# get test and training predictions
# def predict_onoff(Xtrain,Xtest):
# pred_train = np.maximum(gfmean.eval(feed_dict = {X:Xtrain}),0)
# pred_test = np.maximum(gfmean.eval(feed_dict = {X:Xtest}),0)
# return pred_train, pred_test
#
# pred_train, pred_test = predict_onoff(Xtrain,Xtest)
#
# train_rmse = np.sqrt(np.mean((pred_train - Ytrain)**2))
# train_mae = np.mean(np.abs(pred_train - Ytrain))
# test_rmse = np.sqrt(np.mean((pred_test - Ytest)**2))
# test_mae = np.mean(np.abs(pred_test - Ytest))
#
# logger.info("train rmse:"+str(train_rmse))
# logger.info("train mae:"+str(train_mae))
#
# logger.info("test rmse:"+str(test_rmse))
# logger.info("test mae:"+str(test_mae))
# logger.removeHandler(logger.handlers)
def predict_onoff(Xtest):
pred_test = np.maximum(gfmean.eval(feed_dict = {X:Xtest}),0)
return pred_test
pred_test = predict_onoff(Xtest)
test_rmse = np.sqrt(np.mean((pred_test - Ytest)**2))
test_mae = np.mean(np.abs(pred_test - Ytest))
logger.info("test rmse:"+str(test_rmse))
logger.info("test mae:"+str(test_mae))
logger.removeHandler(logger.handlers)
# ****************************************************************
# return values
# ****************************************************************
retdict = {'Xtrain':Xtrain,'Ytrain':Ytrain,
'Xtest':Xtest,'Ytest':Ytest,
# 'rawpred_train':gfmean.eval(feed_dict = {X:Xtrain}),
# 'rawpred_test':gfmean.eval(feed_dict = {X:Xtest}),
# 'pred_train':pred_train,
# 'pred_test':pred_test,
# 'train_rmse':train_rmse,
# 'train_mae':train_mae,
'test_rmse':test_rmse,
'test_mae':test_mae
# ,'train_log_evidence': -cost.eval({X : Xtrain,Y : Ytrain})
}
return retdict
| [
"gpflow.transforms.Log1pe",
"numpy.abs",
"tensorflow.trainable_variables",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"time.strftime",
"numpy.ones",
"numpy.argsort",
"tensorflow.multiply",
"tensorflow.matmul",
"numpy.mean",
"numpy.arange",
"tensorflow.InteractiveSession",
"ten... | [((375, 400), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (393, 400), True, 'import matplotlib.pyplot as plt\n'), ((535, 559), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (557, 559), True, 'import tensorflow as tf\n'), ((622, 648), 'sys.path.append', 'sys.path.append', (['parentDir'], {}), '(parentDir)\n', (637, 648), False, 'import sys\n'), ((909, 933), 'logging.getLogger', 'logging.getLogger', (['"""log"""'], {}), "('log')\n", (926, 933), False, 'import logging\n'), ((1248, 1271), 'onofftf.main.DataSet', 'DataSet', (['Xtrain', 'Ytrain'], {}), '(Xtrain, Ytrain)\n', (1255, 1271), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((1620, 1639), 'numpy.array', 'np.array', (['[10, 100]'], {}), '([10, 100])\n', (1628, 1639), True, 'import numpy as np\n'), ((1660, 1679), 'numpy.array', 'np.array', (['[10, 100]'], {}), '([10, 100])\n', (1668, 1679), True, 'import numpy as np\n'), ((2741, 2773), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'float_type'}), '(dtype=float_type)\n', (2755, 2773), True, 'import tensorflow as tf\n'), ((2784, 2816), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'float_type'}), '(dtype=float_type)\n', (2798, 2816), True, 'import tensorflow as tf\n'), ((13013, 13037), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (13035, 13037), True, 'import tensorflow as tf\n'), ((14114, 14173), 'tensorflow.group', 'tf.group', (["*[group['train_op'] for group in train_opt_group]"], {}), "(*[group['train_op'] for group in train_opt_group])\n", (14122, 14173), True, 'import tensorflow as tf\n'), ((14370, 14393), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (14391, 14393), True, 'import tensorflow as tf\n'), ((14425, 14441), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (14439, 14441), True, 'import tensorflow as tf\n'), ((14487, 14509), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (14507, 14509), True, 'import tensorflow as tf\n'), ((14531, 14578), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['tbPath'], {'graph': 'sess.graph'}), '(tbPath, graph=sess.graph)\n', (14552, 14578), True, 'import tensorflow as tf\n'), ((14786, 14797), 'time.time', 'time.time', ([], {}), '()\n', (14795, 14797), False, 'import time\n'), ((17429, 17465), 'onofftf.utils.modelmanager', 'modelmanager', (['saver', 'sess', 'modelPath'], {}), '(saver, sess, modelPath)\n', (17441, 17465), False, 'from onofftf.utils import modelmanager\n'), ((991, 1019), 'logging.FileHandler', 'logging.FileHandler', (['logPath'], {}), '(logPath)\n', (1010, 1019), False, 'import logging\n'), ((1984, 2025), 'scipy.cluster.vq.kmeans', 'kmeans', (['Xtrain[:, 0:2]', 'num_inducing_f[0]'], {}), '(Xtrain[:, 0:2], num_inducing_f[0])\n', (1990, 2025), False, 'from scipy.cluster.vq import kmeans\n'), ((2829, 2852), 'tensorflow.name_scope', 'tf.name_scope', (['"""f_kern"""'], {}), "('f_kern')\n", (2842, 2852), True, 'import tensorflow as tf\n'), ((3303, 3329), 'onofftf.main.KernSE', 'KernSE', (['fkell[i]', 'fkvar[i]'], {}), '(fkell[i], fkvar[i])\n', (3309, 3329), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((3376, 3399), 'tensorflow.name_scope', 'tf.name_scope', (['"""g_kern"""'], {}), "('g_kern')\n", (3389, 3399), True, 'import tensorflow as tf\n'), ((3850, 3876), 'onofftf.main.KernSE', 'KernSE', (['gkell[i]', 'gkvar[i]'], {}), '(gkell[i], gkvar[i])\n', (3856, 3876), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((3923, 3950), 'tensorflow.name_scope', 'tf.name_scope', (['"""likelihood"""'], {}), "('likelihood')\n", (3936, 3950), True, 'import tensorflow as tf\n'), ((4126, 4148), 'tensorflow.name_scope', 'tf.name_scope', (['"""f_ind"""'], {}), "('f_ind')\n", (4139, 4148), True, 'import tensorflow as tf\n'), ((4319, 4405), 'onofftf.main.Param', 'Param', (['init_u_fm'], {'name': '"""value"""', 'learning_rate': 'indp_param_learning_rate', 'summ': '(True)'}), "(init_u_fm, name='value', learning_rate=indp_param_learning_rate, summ\n =True)\n", (4324, 4405), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((4804, 4826), 'tensorflow.name_scope', 'tf.name_scope', (['"""g_ind"""'], {}), "('g_ind')\n", (4817, 4826), True, 'import tensorflow as tf\n'), ((4997, 5083), 'onofftf.main.Param', 'Param', (['init_u_gm'], {'name': '"""value"""', 'learning_rate': 'indp_param_learning_rate', 'summ': '(True)'}), "(init_u_gm, name='value', learning_rate=indp_param_learning_rate, summ\n =True)\n", (5002, 5083), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((7281, 7307), 'tensorflow.multiply', 'tf.multiply', (['ephi_g', 'fmean'], {}), '(ephi_g, fmean)\n', (7292, 7307), True, 'import tensorflow as tf\n'), ((7324, 7350), 'tensorflow.multiply', 'tf.multiply', (['ephi2_g', 'fvar'], {}), '(ephi2_g, fvar)\n', (7335, 7350), True, 'import tensorflow as tf\n'), ((8049, 8083), 'tensorflow.ones', 'tf.ones', (['n_batch'], {'dtype': 'float_type'}), '(n_batch, dtype=float_type)\n', (8056, 8083), True, 'import tensorflow as tf\n'), ((8575, 8614), 'tensorflow.matmul', 'tf.matmul', (['Kmn', 'alpha'], {'transpose_a': '(True)'}), '(Kmn, alpha, transpose_a=True)\n', (8584, 8614), True, 'import tensorflow as tf\n'), ((8930, 8951), 'numpy.prod', 'np.prod', (['num_inducing'], {}), '(num_inducing)\n', (8937, 8951), True, 'import numpy as np\n'), ((8964, 8985), 'tensorflow.reshape', 'tf.reshape', (['x', '[N, 1]'], {}), '(x, [N, 1])\n', (8974, 8985), True, 'import tensorflow as tf\n'), ((9697, 9745), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[1, 1]', 'dtype': 'float_type'}), '(1.0, shape=[1, 1], dtype=float_type)\n', (9708, 9745), True, 'import tensorflow as tf\n'), ((11357, 11376), 'tensorflow.name_scope', 'tf.name_scope', (['"""kl"""'], {}), "('kl')\n", (11370, 11376), True, 'import tensorflow as tf\n'), ((11512, 11539), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""kl"""', 'kl'], {}), "('kl', kl)\n", (11529, 11539), True, 'import tensorflow as tf\n'), ((11580, 11608), 'tensorflow.name_scope', 'tf.name_scope', (['"""model_build"""'], {}), "('model_build')\n", (11593, 11608), True, 'import tensorflow as tf\n'), ((11873, 11911), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""gfmean"""', 'gfmean'], {}), "('gfmean', gfmean)\n", (11893, 11911), True, 'import tensorflow as tf\n'), ((11919, 11955), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""gfvar"""', 'gfvar'], {}), "('gfvar', gfvar)\n", (11939, 11955), True, 'import tensorflow as tf\n'), ((11963, 12003), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""gfmeanu"""', 'gfmeanu'], {}), "('gfmeanu', gfmeanu)\n", (11983, 12003), True, 'import tensorflow as tf\n'), ((12011, 12047), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""fmean"""', 'fmean'], {}), "('fmean', fmean)\n", (12031, 12047), True, 'import tensorflow as tf\n'), ((12055, 12089), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""fvar"""', 'fvar'], {}), "('fvar', fvar)\n", (12075, 12089), True, 'import tensorflow as tf\n'), ((12097, 12133), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""gmean"""', 'gmean'], {}), "('gmean', gmean)\n", (12117, 12133), True, 'import tensorflow as tf\n'), ((12141, 12175), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""gvar"""', 'gvar'], {}), "('gvar', gvar)\n", (12161, 12175), True, 'import tensorflow as tf\n'), ((12183, 12221), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""pgmean"""', 'pgmean'], {}), "('pgmean', pgmean)\n", (12203, 12221), True, 'import tensorflow as tf\n'), ((12229, 12265), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""pgvar"""', 'pgvar'], {}), "('pgvar', pgvar)\n", (12249, 12265), True, 'import tensorflow as tf\n'), ((12300, 12324), 'tensorflow.name_scope', 'tf.name_scope', (['"""var_exp"""'], {}), "('var_exp')\n", (12313, 12324), True, 'import tensorflow as tf\n'), ((12435, 12472), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""var_exp"""', 'var_exp'], {}), "('var_exp', var_exp)\n", (12452, 12472), True, 'import tensorflow as tf\n'), ((12636, 12687), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""var_exp_scaled"""', 'var_exp_scaled'], {}), "('var_exp_scaled', var_exp_scaled)\n", (12653, 12687), True, 'import tensorflow as tf\n'), ((12723, 12744), 'tensorflow.name_scope', 'tf.name_scope', (['"""cost"""'], {}), "('cost')\n", (12736, 12744), True, 'import tensorflow as tf\n'), ((12793, 12824), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cost"""', 'cost'], {}), "('cost', cost)\n", (12810, 12824), True, 'import tensorflow as tf\n'), ((13356, 13413), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'group_learning_rate'}), '(learning_rate=group_learning_rate)\n', (13378, 13413), True, 'import tensorflow as tf\n'), ((13442, 13476), 'tensorflow.gradients', 'tf.gradients', (['cost', 'group_var_list'], {}), '(cost, group_var_list)\n', (13454, 13476), True, 'import tensorflow as tf\n'), ((14637, 14670), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (14668, 14670), True, 'import tensorflow as tf\n'), ((14930, 14941), 'time.time', 'time.time', ([], {}), '()\n', (14939, 14941), False, 'import time\n'), ((19554, 19587), 'numpy.mean', 'np.mean', (['((pred_test - Ytest) ** 2)'], {}), '((pred_test - Ytest) ** 2)\n', (19561, 19587), True, 'import numpy as np\n'), ((19612, 19637), 'numpy.abs', 'np.abs', (['(pred_test - Ytest)'], {}), '(pred_test - Ytest)\n', (19618, 19637), True, 'import numpy as np\n'), ((1549, 1560), 'numpy.array', 'np.array', (['e'], {}), '(e)\n', (1557, 1560), True, 'import numpy as np\n'), ((2202, 2225), 'numpy.prod', 'np.prod', (['num_inducing_f'], {}), '(num_inducing_f)\n', (2209, 2225), True, 'import numpy as np\n'), ((2365, 2388), 'numpy.prod', 'np.prod', (['num_inducing_g'], {}), '(num_inducing_g)\n', (2372, 2388), True, 'import numpy as np\n'), ((4169, 4247), 'onofftf.main.Param', 'Param', (['init_Zf[i]'], {'name': '"""z"""', 'learning_rate': 'indp_param_learning_rate', 'summ': '(True)'}), "(init_Zf[i], name='z', learning_rate=indp_param_learning_rate, summ=True)\n", (4174, 4247), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((4443, 4558), 'onofftf.main.Param', 'Param', (['init_u_fs_sqrt', 'transforms.positive'], {'name': '"""variance"""', 'learning_rate': 'indp_param_learning_rate', 'summ': '(True)'}), "(init_u_fs_sqrt, transforms.positive, name='variance', learning_rate=\n indp_param_learning_rate, summ=True)\n", (4448, 4558), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((4847, 4925), 'onofftf.main.Param', 'Param', (['init_Zg[i]'], {'name': '"""z"""', 'learning_rate': 'indp_param_learning_rate', 'summ': '(True)'}), "(init_Zg[i], name='z', learning_rate=indp_param_learning_rate, summ=True)\n", (4852, 4925), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((5121, 5236), 'onofftf.main.Param', 'Param', (['init_u_gs_sqrt', 'transforms.positive'], {'name': '"""variance"""', 'learning_rate': 'indp_param_learning_rate', 'summ': '(True)'}), "(init_u_gs_sqrt, transforms.positive, name='variance', learning_rate=\n indp_param_learning_rate, summ=True)\n", (5126, 5236), False, 'from onofftf.main import Param, DataSet, GaussKL, KernSE, GPConditional, GaussKLkron\n'), ((7393, 7409), 'tensorflow.square', 'tf.square', (['fmean'], {}), '(fmean)\n', (7402, 7409), True, 'import tensorflow as tf\n'), ((7863, 7888), 'tensorflow.matrix_inverse', 'tf.matrix_inverse', (['Kmm[p]'], {}), '(Kmm[p])\n', (7880, 7888), True, 'import tensorflow as tf\n'), ((8169, 8207), 'tensorflow.gather', 'tf.gather', (['Xnew', 'input_mask[p]'], {'axis': '(1)'}), '(Xnew, input_mask[p], axis=1)\n', (8178, 8207), True, 'import tensorflow as tf\n'), ((9181, 9233), 'tensorflow.matmul', 'tf.matmul', (['X', 'Ap'], {'transpose_a': '(True)', 'transpose_b': '(True)'}), '(X, Ap, transpose_a=True, transpose_b=True)\n', (9190, 9233), True, 'import tensorflow as tf\n'), ((9250, 9271), 'tensorflow.reshape', 'tf.reshape', (['b', '[N, 1]'], {}), '(b, [N, 1])\n', (9260, 9271), True, 'import tensorflow as tf\n'), ((10509, 10518), 'tensorflow.abs', 'tf.abs', (['h'], {}), '(h)\n', (10515, 10518), True, 'import tensorflow as tf\n'), ((10670, 10695), 'tensorflow.multiply', 'tf.multiply', (['term1', 'term2'], {}), '(term1, term2)\n', (10681, 10695), True, 'import tensorflow as tf\n'), ((10717, 10736), 'tensorflow.sqrt', 'tf.sqrt', (['(1.0 + gvar)'], {}), '(1.0 + gvar)\n', (10724, 10736), True, 'import tensorflow as tf\n'), ((10752, 10775), 'tensorflow.sqrt', 'tf.sqrt', (['(1.0 + 2 * gvar)'], {}), '(1.0 + 2 * gvar)\n', (10759, 10775), True, 'import tensorflow as tf\n'), ((10923, 10938), 'tensorflow.square', 'tf.square', (['cdfz'], {}), '(cdfz)\n', (10932, 10938), True, 'import tensorflow as tf\n'), ((12520, 12549), 'tensorflow.cast', 'tf.cast', (['num_data', 'float_type'], {}), '(num_data, float_type)\n', (12527, 12549), True, 'import tensorflow as tf\n'), ((12552, 12586), 'tensorflow.cast', 'tf.cast', (['num_minibatch', 'float_type'], {}), '(num_minibatch, float_type)\n', (12559, 12586), True, 'import tensorflow as tf\n'), ((13727, 13777), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["(var.name + '/gradient')", 'grad'], {}), "(var.name + '/gradient', grad)\n", (13747, 13777), True, 'import tensorflow as tf\n'), ((4001, 4020), 'gpflow.transforms.Log1pe', 'transforms.Log1pe', ([], {}), '()\n', (4018, 4020), False, 'from gpflow import transforms\n'), ((4642, 4693), 'gpflow.transforms.LowerTriangular', 'transforms.LowerTriangular', (['init_u_fs_sqrt.shape[0]'], {}), '(init_u_fs_sqrt.shape[0])\n', (4668, 4693), False, 'from gpflow import transforms\n'), ((5320, 5371), 'gpflow.transforms.LowerTriangular', 'transforms.LowerTriangular', (['init_u_gs_sqrt.shape[0]'], {}), '(init_u_gs_sqrt.shape[0])\n', (5346, 5371), False, 'from gpflow import transforms\n'), ((8021, 8032), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (8029, 8032), True, 'import numpy as np\n'), ((8444, 8474), 'tensorflow.expand_dims', 'tf.expand_dims', (['Kmn_kron[0]', '(1)'], {}), '(Kmn_kron[0], 1)\n', (8458, 8474), True, 'import tensorflow as tf\n'), ((8488, 8509), 'numpy.prod', 'np.prod', (['num_inducing'], {}), '(num_inducing)\n', (8495, 8509), True, 'import numpy as np\n'), ((10018, 10061), 'numpy.arange', 'np.arange', (['tmp', '(tmp + p_dim)'], {'dtype': 'np.int32'}), '(tmp, tmp + p_dim, dtype=np.int32)\n', (10027, 10061), True, 'import numpy as np\n'), ((10539, 10549), 'tensorflow.atan', 'tf.atan', (['a'], {}), '(a)\n', (10546, 10549), True, 'import tensorflow as tf\n'), ((11029, 11044), 'tensorflow.abs', 'tf.abs', (['ephisqg'], {}), '(ephisqg)\n', (11035, 11044), True, 'import tensorflow as tf\n'), ((11082, 11098), 'tensorflow.abs', 'tf.abs', (['evarphig'], {}), '(evarphig)\n', (11088, 11098), True, 'import tensorflow as tf\n'), ((14728, 14756), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M"""'], {}), "('%Y%m%d-%H%M')\n", (14741, 14756), False, 'import time\n'), ((15517, 15553), 'onofftf.utils.modelmanager', 'modelmanager', (['saver', 'sess', 'modelPath'], {}), '(saver, sess, modelPath)\n', (15529, 15553), False, 'from onofftf.utils import modelmanager\n'), ((16082, 16101), 'numpy.argsort', 'np.argsort', (['lp_zf_t'], {}), '(lp_zf_t)\n', (16092, 16101), True, 'import numpy as np\n'), ((16135, 16154), 'numpy.argsort', 'np.argsort', (['lp_zg_t'], {}), '(lp_zg_t)\n', (16145, 16154), True, 'import numpy as np\n'), ((16280, 16311), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True)\n', (16292, 16311), True, 'import matplotlib.pyplot as plt\n'), ((16486, 16514), 'numpy.arange', 'np.arange', (['num_inducing_f[0]'], {}), '(num_inducing_f[0])\n', (16495, 16514), True, 'import numpy as np\n'), ((16883, 16911), 'numpy.arange', 'np.arange', (['num_inducing_g[0]'], {}), '(num_inducing_g[0])\n', (16892, 16911), True, 'import numpy as np\n'), ((2262, 2285), 'numpy.prod', 'np.prod', (['num_inducing_f'], {}), '(num_inducing_f)\n', (2269, 2285), True, 'import numpy as np\n'), ((2425, 2448), 'numpy.prod', 'np.prod', (['num_inducing_g'], {}), '(num_inducing_g)\n', (2432, 2448), True, 'import numpy as np\n'), ((2901, 2920), 'gpflow.transforms.Log1pe', 'transforms.Log1pe', ([], {}), '()\n', (2918, 2920), False, 'from gpflow import transforms\n'), ((3118, 3137), 'gpflow.transforms.Log1pe', 'transforms.Log1pe', ([], {}), '()\n', (3135, 3137), False, 'from gpflow import transforms\n'), ((3448, 3467), 'gpflow.transforms.Log1pe', 'transforms.Log1pe', ([], {}), '()\n', (3465, 3467), False, 'from gpflow import transforms\n'), ((3665, 3684), 'gpflow.transforms.Log1pe', 'transforms.Log1pe', ([], {}), '()\n', (3682, 3684), False, 'from gpflow import transforms\n'), ((7736, 7777), 'tensorflow.eye', 'tf.eye', (['num_inducing[p]'], {'dtype': 'float_type'}), '(num_inducing[p], dtype=float_type)\n', (7742, 7777), True, 'import tensorflow as tf\n'), ((8003, 8017), 'tensorflow.shape', 'tf.shape', (['Xnew'], {}), '(Xnew)\n', (8011, 8017), True, 'import tensorflow as tf\n'), ((9365, 9376), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (9373, 9376), True, 'import tensorflow as tf\n'), ((9380, 9391), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (9388, 9391), True, 'import tensorflow as tf\n'), ((9419, 9430), 'tensorflow.shape', 'tf.shape', (['b'], {}), '(b)\n', (9427, 9430), True, 'import tensorflow as tf\n'), ((9434, 9445), 'tensorflow.shape', 'tf.shape', (['b'], {}), '(b)\n', (9442, 9445), True, 'import tensorflow as tf\n'), ((9481, 9526), 'tensorflow.reshape', 'tf.reshape', (['a', '[a_shape[0], 1, a_shape[1], 1]'], {}), '(a, [a_shape[0], 1, a_shape[1], 1])\n', (9491, 9526), True, 'import tensorflow as tf\n'), ((9556, 9601), 'tensorflow.reshape', 'tf.reshape', (['b', '[1, b_shape[0], 1, b_shape[1]]'], {}), '(b, [1, b_shape[0], 1, b_shape[1]])\n', (9566, 9601), True, 'import tensorflow as tf\n'), ((10203, 10220), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (10209, 10220), True, 'import numpy as np\n'), ((10229, 10245), 'tensorflow.log', 'tf.log', (['noisevar'], {}), '(noisevar)\n', (10235, 10245), True, 'import tensorflow as tf\n'), ((13214, 13235), 'numpy.array', 'np.array', (['all_lr_list'], {}), '(all_lr_list)\n', (13222, 13235), True, 'import numpy as np\n'), ((16749, 16795), 'numpy.round', 'np.round', (['(lp_zf_t[lp_zf_sort_ind] * scale_z)', '(4)'], {}), '(lp_zf_t[lp_zf_sort_ind] * scale_z, 4)\n', (16757, 16795), True, 'import numpy as np\n'), ((17146, 17192), 'numpy.round', 'np.round', (['(lp_zg_t[lp_zg_sort_ind] * scale_z)', '(4)'], {}), '(lp_zg_t[lp_zg_sort_ind] * scale_z, 4)\n', (17154, 17192), True, 'import numpy as np\n'), ((5944, 5987), 'tensorflow.eye', 'tf.eye', (['num_inducing_f[i]'], {'dtype': 'float_type'}), '(num_inducing_f[i], dtype=float_type)\n', (5950, 5987), True, 'import tensorflow as tf\n'), ((6143, 6186), 'tensorflow.eye', 'tf.eye', (['num_inducing_g[i]'], {'dtype': 'float_type'}), '(num_inducing_g[i], dtype=float_type)\n', (6149, 6186), True, 'import tensorflow as tf\n'), ((8666, 8701), 'tensorflow.matmul', 'tf.matmul', (['Kmn', 'A'], {'transpose_a': '(True)'}), '(Kmn, A, transpose_a=True)\n', (8675, 8701), True, 'import tensorflow as tf\n'), ((10615, 10627), 'tensorflow.square', 'tf.square', (['h'], {}), '(h)\n', (10624, 10627), True, 'import tensorflow as tf\n'), ((16634, 16680), 'numpy.round', 'np.round', (['(lp_zf_t[lp_zf_sort_ind] * scale_z)', '(4)'], {}), '(lp_zf_t[lp_zf_sort_ind] * scale_z, 4)\n', (16642, 16680), True, 'import numpy as np\n'), ((16795, 16826), 'numpy.ones', 'np.ones', (['[num_inducing_f[1], 1]'], {}), '([num_inducing_f[1], 1])\n', (16802, 16826), True, 'import numpy as np\n'), ((17031, 17077), 'numpy.round', 'np.round', (['(lp_zg_t[lp_zg_sort_ind] * scale_z)', '(4)'], {}), '(lp_zg_t[lp_zg_sort_ind] * scale_z, 4)\n', (17039, 17077), True, 'import numpy as np\n'), ((17192, 17223), 'numpy.ones', 'np.ones', (['[num_inducing_g[1], 1]'], {}), '([num_inducing_g[1], 1])\n', (17199, 17223), True, 'import numpy as np\n'), ((8746, 8779), 'tensorflow.matmul', 'tf.matmul', (['A', 'S'], {'transpose_a': '(True)'}), '(A, S, transpose_a=True)\n', (8755, 8779), True, 'import tensorflow as tf\n'), ((9120, 9149), 'numpy.round', 'np.round', (['(N / num_inducing[p])'], {}), '(N / num_inducing[p])\n', (9128, 9149), True, 'import numpy as np\n'), ((10272, 10290), 'tensorflow.square', 'tf.square', (['(Y - fmu)'], {}), '(Y - fmu)\n', (10281, 10290), True, 'import tensorflow as tf\n'), ((10630, 10642), 'tensorflow.square', 'tf.square', (['a'], {}), '(a)\n', (10639, 10642), True, 'import tensorflow as tf\n'), ((10429, 10441), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (10436, 10441), True, 'import numpy as np\n'), ((15336, 15347), 'time.time', 'time.time', ([], {}), '()\n', (15345, 15347), False, 'import time\n')] |
"""
Module providing JSON serialization and de-serialization just like the `json`
module, but with support for more data types (e.g., NumPy arrays).
"""
import base64
import fractions
import io
import json
import warnings
# NumPy is optional (used in the extended JSON encoder/decoder)
try:
import numpy as np
_NUMPY_ERROR = None
except ImportError as e:
_NUMPY_ERROR = e
###
#%% internal helpers
###
class _ExtendedJsonEncoder(json.JSONEncoder):
"""
JSON encoder which also supports objects of the following types:
`bytes`, `numpy.ndarray`.
For decoding, use `ExtendedJsonDecoder.object_hook` as value for the
`object_hook` parameter of `json.load` or `json.loads`.
"""
def default(self, o):
# byte arrays
if isinstance(o, bytes):
e = base64.b64encode(o).decode("ascii")
return {"__ExtendedJsonType__": "bytes", "__ExtendedJsonValue__": e, "__ExtendedJsonEncoding__": "base64"}
# fractions
if isinstance(o, fractions.Fraction):
e = [o.numerator, o.denominator]
return {"__ExtendedJsonType__": "fractions.Fraction", "__ExtendedJsonValue__": e, "__ExtendedJsonEncoding__": "plain"}
# NumPy arrays
if (_NUMPY_ERROR is None) and isinstance(o, np.ndarray):
b = io.BytesIO()
np.save(file=b, arr=o, allow_pickle=False, fix_imports=False)
e = base64.b64encode(b.getvalue()).decode("ascii")
return {"__ExtendedJsonType__": "numpy.ndarray", "__ExtendedJsonValue__": e, "__ExtendedJsonEncoding__": "base64"}
# no extended object
return super().default(o)
class _ExtendedJsonDecoder():
"""
This class is the counterpart of `ExtendedJsonEncoder` and provides the
static method `object_hook`.
"""
@staticmethod
def object_hook(o):
# only handle dicts which have the three keys "__ExtendedJsonType__", "__ExtendedJsonValue__", and "__ExtendedJsonEncoding__"
keys = o.keys()
if (len(keys) == 3) and ("__ExtendedJsonType__" in keys) and ("__ExtendedJsonValue__" in keys) and ("__ExtendedJsonEncoding__" in keys):
# byte arrays
if (o["__ExtendedJsonType__"] == "bytes") and (o["__ExtendedJsonEncoding__"] == "base64"):
e = o["__ExtendedJsonValue__"]
b = base64.b64decode(bytes(e, "ascii"))
return b
# fractions
if (o["__ExtendedJsonType__"] == "fractions.Fraction") and (o["__ExtendedJsonEncoding__"] == "plain"):
(n, d) = o["__ExtendedJsonValue__"]
f = fractions.Fraction(numerator=n, denominator=d)
return f
# NumPy arrays
if o["__ExtendedJsonType__"] == "numpy.ndarray":
if _NUMPY_ERROR is None:
e = o["__ExtendedJsonValue__"]
b = base64.b64decode(bytes(e, "ascii"))
x = np.load(file=io.BytesIO(b), allow_pickle=False, fix_imports=False)
return x
else:
warnings.warn("Could not decode object of type 'numpy.ndarray', because NumPy import failed: '{}'".format(_NUMPY_ERROR))
# no extended object
return o
###
#%% JSON equivalents
###
def dump(*args, **kwargs):
"""
See :func:`json.dump()`.
"""
return json.dump(*args, **kwargs)
def dumps(*args, **kwargs):
"""
See :func:`json.dumps()`.
"""
kwargs["ensure_ascii"] = True
kwargs["cls"] = _ExtendedJsonEncoder
return json.dumps(*args, **kwargs)
def load(*args, **kwargs):
"""
See :func:`json.load()`.
"""
kwargs["object_hook"] = _ExtendedJsonDecoder.object_hook
return json.load(*args, **kwargs)
def loads(*args, **kwargs):
"""
See :func:`json.loads()`.
"""
kwargs["object_hook"] = _ExtendedJsonDecoder.object_hook
return json.loads(*args, **kwargs)
| [
"json.dump",
"io.BytesIO",
"json.load",
"numpy.save",
"json.loads",
"json.dumps",
"base64.b64encode",
"fractions.Fraction"
] | [((3387, 3413), 'json.dump', 'json.dump', (['*args'], {}), '(*args, **kwargs)\n', (3396, 3413), False, 'import json\n'), ((3576, 3603), 'json.dumps', 'json.dumps', (['*args'], {}), '(*args, **kwargs)\n', (3586, 3603), False, 'import json\n'), ((3750, 3776), 'json.load', 'json.load', (['*args'], {}), '(*args, **kwargs)\n', (3759, 3776), False, 'import json\n'), ((3925, 3952), 'json.loads', 'json.loads', (['*args'], {}), '(*args, **kwargs)\n', (3935, 3952), False, 'import json\n'), ((1315, 1327), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1325, 1327), False, 'import io\n'), ((1340, 1401), 'numpy.save', 'np.save', ([], {'file': 'b', 'arr': 'o', 'allow_pickle': '(False)', 'fix_imports': '(False)'}), '(file=b, arr=o, allow_pickle=False, fix_imports=False)\n', (1347, 1401), True, 'import numpy as np\n'), ((2628, 2674), 'fractions.Fraction', 'fractions.Fraction', ([], {'numerator': 'n', 'denominator': 'd'}), '(numerator=n, denominator=d)\n', (2646, 2674), False, 'import fractions\n'), ((812, 831), 'base64.b64encode', 'base64.b64encode', (['o'], {}), '(o)\n', (828, 831), False, 'import base64\n'), ((2978, 2991), 'io.BytesIO', 'io.BytesIO', (['b'], {}), '(b)\n', (2988, 2991), False, 'import io\n')] |
import os
import argparse
import importlib
from natsort import natsorted
from tqdm import tqdm, trange
from collections import Counter
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from lib.config import config, update_config, infer_exp_id
from lib import dataset
if __name__ == '__main__':
# Parse args & config
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg', required=True)
parser.add_argument('--pth')
parser.add_argument('--out')
parser.add_argument('--vis_dir')
parser.add_argument('--y', action='store_true')
parser.add_argument('--test_hw', type=int, nargs='*')
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
update_config(config, args)
device = 'cuda' if config.cuda else 'cpu'
if config.cuda and config.cuda_benchmark:
torch.backends.cudnn.benchmark = False
# Init global variable
if not args.pth:
from glob import glob
exp_id = infer_exp_id(args.cfg)
exp_ckpt_root = os.path.join(config.ckpt_root, exp_id)
args.pth = natsorted(glob(os.path.join(exp_ckpt_root, 'ep*pth')))[-1]
print(f'No pth given, inferring the trained pth: {args.pth}')
if not args.out:
args.out = os.path.splitext(args.pth)[0]
print(f'No out given, inferring the output dir: {args.out}')
os.makedirs(args.out, exist_ok=True)
if os.path.isfile(os.path.join(args.out, 'cm.npz')) and not args.y:
print(f'{os.path.join(args.out, "cm.npz")} is existed:')
cm = np.load(os.path.join(args.out, 'cm.npz'))['cm']
inter = np.diag(cm)
union = cm.sum(0) + cm.sum(1) - inter
ious = inter / union
accs = inter / cm.sum(1)
DatasetClass = getattr(dataset, config.dataset.name)
config.dataset.valid_kwargs.update(config.dataset.common_kwargs)
valid_dataset = DatasetClass(**config.dataset.valid_kwargs)
id2class = np.array(valid_dataset.ID2CLASS)
for name, iou, acc in zip(id2class, ious, accs):
print(f'{name:20s}: iou {iou*100:5.2f} / acc {acc*100:5.2f}')
print(f'{"Overall":20s}: iou {ious.mean()*100:5.2f} / acc {accs.mean()*100:5.2f}')
print('Re-write this results ?', end=' ')
input()
# Init dataset
DatasetClass = getattr(dataset, config.dataset.name)
config.dataset.valid_kwargs.update(config.dataset.common_kwargs)
if args.test_hw:
input_hw = config.dataset.common_kwargs['hw']
config.dataset.valid_kwargs['hw'] = args.test_hw
else:
input_hw = None
valid_dataset = DatasetClass(**config.dataset.valid_kwargs)
valid_loader = DataLoader(valid_dataset, 1,
num_workers=config.num_workers,
pin_memory=config.cuda)
# Init network
model_file = importlib.import_module(config.model.file)
model_class = getattr(model_file, config.model.modelclass)
net = model_class(**config.model.kwargs).to(device)
net.load_state_dict(torch.load(args.pth))
net = net.to(device).eval()
# Start eval
cm = 0
num_classes = config.model.kwargs.modalities_config.SemanticSegmenter.num_classes
with torch.no_grad():
for batch in tqdm(valid_loader, position=1, total=len(valid_loader)):
color = batch['x'].to(device)
sem = batch['sem'].to(device)
mask = (sem >= 0)
if mask.sum() == 0:
continue
# feed forward & compute losses
if input_hw is not None:
color = F.interpolate(color, size=input_hw, mode='bilinear', align_corners=False)
pred_sem = net.infer(color)['sem']
if input_hw is not None:
pred_sem = F.interpolate(pred_sem, size=args.test_hw, mode='bilinear', align_corners=False)
# Visualization
if args.vis_dir:
import matplotlib.pyplot as plt
from imageio import imwrite
cmap = (plt.get_cmap('gist_rainbow')(np.arange(num_classes) / num_classes)[...,:3] * 255).astype(np.uint8)
rgb = (batch['x'][0, :3].permute(1,2,0) * 255).cpu().numpy().astype(np.uint8)
vis_sem = cmap[pred_sem[0].argmax(0).cpu().numpy()]
vis_sem = (rgb * 0.2 + vis_sem * 0.8).astype(np.uint8)
imwrite(os.path.join(args.vis_dir, batch['fname'][0].strip()), vis_sem)
vis_sem = cmap[sem[0].cpu().numpy()]
vis_sem = (rgb * 0.2 + vis_sem * 0.8).astype(np.uint8)
imwrite(os.path.join(args.vis_dir, batch['fname'][0].strip() + '.gt.png'), vis_sem)
# Log
gt = sem[mask]
pred = pred_sem.argmax(1)[mask]
assert gt.min() >= 0 and gt.max() < num_classes and pred_sem.shape[1] == num_classes
cm += np.bincount((gt * num_classes + pred).cpu().numpy(), minlength=num_classes**2)
# Summarize
print(' Summarize '.center(50, '='))
cm = cm.reshape(num_classes, num_classes)
id2class = np.array(valid_dataset.ID2CLASS)
valid_mask = (cm.sum(1) != 0)
cm = cm[valid_mask][:, valid_mask]
id2class = id2class[valid_mask]
inter = np.diag(cm)
union = cm.sum(0) + cm.sum(1) - inter
ious = inter / union
accs = inter / cm.sum(1)
for name, iou, acc in zip(id2class, ious, accs):
print(f'{name:20s}: iou {iou*100:5.2f} / acc {acc*100:5.2f}')
print(f'{"Overall":20s}: iou {ious.mean()*100:5.2f} / acc {accs.mean()*100:5.2f}')
np.savez(os.path.join(args.out, 'cm.npz'), cm=cm)
| [
"argparse.ArgumentParser",
"importlib.import_module",
"lib.config.config.dataset.valid_kwargs.update",
"torch.utils.data.DataLoader",
"lib.config.update_config",
"lib.config.infer_exp_id",
"os.makedirs",
"torch.load",
"matplotlib.pyplot.get_cmap",
"numpy.array",
"os.path.splitext",
"numpy.aran... | [((416, 495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (439, 495), False, 'import argparse\n'), ((965, 992), 'lib.config.update_config', 'update_config', (['config', 'args'], {}), '(config, args)\n', (978, 992), False, 'from lib.config import config, update_config, infer_exp_id\n'), ((2625, 2689), 'lib.config.config.dataset.valid_kwargs.update', 'config.dataset.valid_kwargs.update', (['config.dataset.common_kwargs'], {}), '(config.dataset.common_kwargs)\n', (2659, 2689), False, 'from lib.config import config, update_config, infer_exp_id\n'), ((2939, 3028), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset', '(1)'], {'num_workers': 'config.num_workers', 'pin_memory': 'config.cuda'}), '(valid_dataset, 1, num_workers=config.num_workers, pin_memory=\n config.cuda)\n', (2949, 3028), False, 'from torch.utils.data import DataLoader\n'), ((3121, 3163), 'importlib.import_module', 'importlib.import_module', (['config.model.file'], {}), '(config.model.file)\n', (3144, 3163), False, 'import importlib\n'), ((5346, 5378), 'numpy.array', 'np.array', (['valid_dataset.ID2CLASS'], {}), '(valid_dataset.ID2CLASS)\n', (5354, 5378), True, 'import numpy as np\n'), ((5500, 5511), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (5507, 5511), True, 'import numpy as np\n'), ((1229, 1251), 'lib.config.infer_exp_id', 'infer_exp_id', (['args.cfg'], {}), '(args.cfg)\n', (1241, 1251), False, 'from lib.config import config, update_config, infer_exp_id\n'), ((1276, 1314), 'os.path.join', 'os.path.join', (['config.ckpt_root', 'exp_id'], {}), '(config.ckpt_root, exp_id)\n', (1288, 1314), False, 'import os\n'), ((1613, 1649), 'os.makedirs', 'os.makedirs', (['args.out'], {'exist_ok': '(True)'}), '(args.out, exist_ok=True)\n', (1624, 1649), False, 'import os\n'), ((1864, 1875), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (1871, 1875), True, 'import numpy as np\n'), ((2053, 2117), 'lib.config.config.dataset.valid_kwargs.update', 'config.dataset.valid_kwargs.update', (['config.dataset.common_kwargs'], {}), '(config.dataset.common_kwargs)\n', (2087, 2117), False, 'from lib.config import config, update_config, infer_exp_id\n'), ((2205, 2237), 'numpy.array', 'np.array', (['valid_dataset.ID2CLASS'], {}), '(valid_dataset.ID2CLASS)\n', (2213, 2237), True, 'import numpy as np\n'), ((3307, 3327), 'torch.load', 'torch.load', (['args.pth'], {}), '(args.pth)\n', (3317, 3327), False, 'import torch\n'), ((3485, 3500), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3498, 3500), False, 'import torch\n'), ((5849, 5881), 'os.path.join', 'os.path.join', (['args.out', '"""cm.npz"""'], {}), "(args.out, 'cm.npz')\n", (5861, 5881), False, 'import os\n'), ((1505, 1531), 'os.path.splitext', 'os.path.splitext', (['args.pth'], {}), '(args.pth)\n', (1521, 1531), False, 'import os\n'), ((1672, 1704), 'os.path.join', 'os.path.join', (['args.out', '"""cm.npz"""'], {}), "(args.out, 'cm.npz')\n", (1684, 1704), False, 'import os\n'), ((1808, 1840), 'os.path.join', 'os.path.join', (['args.out', '"""cm.npz"""'], {}), "(args.out, 'cm.npz')\n", (1820, 1840), False, 'import os\n'), ((3857, 3930), 'torch.nn.functional.interpolate', 'F.interpolate', (['color'], {'size': 'input_hw', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(color, size=input_hw, mode='bilinear', align_corners=False)\n", (3870, 3930), True, 'import torch.nn.functional as F\n'), ((4042, 4127), 'torch.nn.functional.interpolate', 'F.interpolate', (['pred_sem'], {'size': 'args.test_hw', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(pred_sem, size=args.test_hw, mode='bilinear', align_corners=False\n )\n", (4055, 4127), True, 'import torch.nn.functional as F\n'), ((1349, 1386), 'os.path.join', 'os.path.join', (['exp_ckpt_root', '"""ep*pth"""'], {}), "(exp_ckpt_root, 'ep*pth')\n", (1361, 1386), False, 'import os\n'), ((1739, 1771), 'os.path.join', 'os.path.join', (['args.out', '"""cm.npz"""'], {}), "(args.out, 'cm.npz')\n", (1751, 1771), False, 'import os\n'), ((4297, 4325), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (4309, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4326, 4348), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (4335, 4348), True, 'import numpy as np\n')] |
"""Utility functions to support other modules."""
import numpy as np
def check_distance_matrix(distances):
"""Perform all tests to check if the distance matrix is correct.
Check if the distances matrix provided respects all constraints a distance
matrix must have.
Parameters
----------
distances: array, shape (n_points, n_points)
The distances symmetric square matrix.
"""
# square matrix check
if distances.shape[0] != distances.shape[1]:
raise ValueError("Distance matrix provided is not square. "
f"The shape provided is {distances.shape}")
# symmetry check
if not is_symmetric(distances):
raise ValueError("Distance matrix provided is not symmetric.")
# all positive
if not np.all(distances >= 0):
raise ValueError("Distances matrices must have all its entries "
"positive.")
def is_symmetric(matrix, rtol=1e-5, atol=1e-8):
"""Check if a matrix is symmetric.
Parameters
----------
matrix: array, shape (n, n)
matrix to be checked.
rtol: float, optional (default=1e-5)
Relative tolerance.
atol: float, optional (default=1e-8)
Absolute tolerance.
"""
return np.allclose(matrix, matrix.T, rtol=rtol, atol=atol)
def random_permutation_matrix(size):
"""Random permutation matrix.
Parameters
----------
size : int
The dimension of the random permutation matrix.
Returns
-------
random_permutation : array, shape (size, size)
An identity matrix with its rows random shuffled.
"""
identity = np.identity(size)
index = np.arange(0, size)
np.random.shuffle(index)
random_permutation = identity[index]
return random_permutation
def spin_energy(ordered_distances, weight_matrix):
"""SPIN matrix energy.
Metrices with better sorting have lower energies.
Parameters
----------
ordered_distances : array, shape (n, n)
The ordered distances matrix. ordered_distances = PDP.T
weight_matrix : array, shape (n, n)
The weight matrix to weight the ordered distances matrix.
Returns
-------
energy : float
The energy of the associated ordered distance matrix and the
weight matrix.
"""
energy = np.trace(ordered_distances.dot(weight_matrix))
return energy
| [
"numpy.random.shuffle",
"numpy.allclose",
"numpy.identity",
"numpy.arange",
"numpy.all"
] | [((1264, 1315), 'numpy.allclose', 'np.allclose', (['matrix', 'matrix.T'], {'rtol': 'rtol', 'atol': 'atol'}), '(matrix, matrix.T, rtol=rtol, atol=atol)\n', (1275, 1315), True, 'import numpy as np\n'), ((1649, 1666), 'numpy.identity', 'np.identity', (['size'], {}), '(size)\n', (1660, 1666), True, 'import numpy as np\n'), ((1679, 1697), 'numpy.arange', 'np.arange', (['(0)', 'size'], {}), '(0, size)\n', (1688, 1697), True, 'import numpy as np\n'), ((1702, 1726), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (1719, 1726), True, 'import numpy as np\n'), ((789, 811), 'numpy.all', 'np.all', (['(distances >= 0)'], {}), '(distances >= 0)\n', (795, 811), True, 'import numpy as np\n')] |
"""Implementations of various coupling layers."""
import warnings
import numpy as np
import torch
from nflows.transforms import splines
from nflows.transforms.base import Transform
from nflows.transforms.nonlinearities import (
PiecewiseCubicCDF,
PiecewiseLinearCDF,
PiecewiseQuadraticCDF,
PiecewiseRationalQuadraticCDF,
)
from nflows.utils import torchutils
from nflows.transforms.coupling import (
CouplingTransform,
PiecewiseCouplingTransform
)
class PiecewiseCircularRationalQuadraticCouplingTransform(PiecewiseCouplingTransform):
def __init__(
self,
mask,
transform_net_create_fn,
num_bins=10,
tails=None,
tail_bound=1.0,
apply_unconditional_transform=False,
img_shape=None,
min_bin_width=splines.rational_quadratic.DEFAULT_MIN_BIN_WIDTH,
min_bin_height=splines.rational_quadratic.DEFAULT_MIN_BIN_HEIGHT,
min_derivative=splines.rational_quadratic.DEFAULT_MIN_DERIVATIVE,
):
self.num_bins = num_bins
self.min_bin_width = min_bin_width
self.min_bin_height = min_bin_height
self.min_derivative = min_derivative
self.tails = tails
self.tail_bound = tail_bound
if apply_unconditional_transform:
unconditional_transform = lambda features: PiecewiseRationalQuadraticCDF(
shape=[features] + (img_shape if img_shape else []),
num_bins=num_bins,
tails=tails,
tail_bound=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
else:
unconditional_transform = None
super().__init__(
mask,
transform_net_create_fn,
unconditional_transform=unconditional_transform,
)
def _transform_dim_multiplier(self):
if self.tails == "linear":
return self.num_bins * 3 - 1
else:
return self.num_bins * 3
def _piecewise_cdf(self, inputs, transform_params, inverse=False):
unnormalized_widths = transform_params[..., : self.num_bins]
unnormalized_heights = transform_params[..., self.num_bins : 2 * self.num_bins]
unnormalized_derivatives = transform_params[..., 2 * self.num_bins :]
## constraint the derivatives at the two end points are the same
unnormalized_derivatives=torch.cat([unnormalized_derivatives, unnormalized_derivatives[..., 0][..., None]], dim = -1)
if hasattr(self.transform_net, "hidden_features"):
unnormalized_widths /= np.sqrt(self.transform_net.hidden_features)
unnormalized_heights /= np.sqrt(self.transform_net.hidden_features)
elif hasattr(self.transform_net, "hidden_channels"):
unnormalized_widths /= np.sqrt(self.transform_net.hidden_channels)
unnormalized_heights /= np.sqrt(self.transform_net.hidden_channels)
else:
warnings.warn(
"Inputs to the softmax are not scaled down: initialization might be bad."
)
if self.tails is None:
spline_fn = splines.rational_quadratic_spline
spline_kwargs = {"left": -np.pi, "right": np.pi, "bottom": -np.pi, "top": np.pi}
else:
spline_fn = splines.unconstrained_rational_quadratic_spline
spline_kwargs = {"tails": self.tails, "tail_bound": self.tail_bound}
return spline_fn(
inputs=inputs,
unnormalized_widths=unnormalized_widths,
unnormalized_heights=unnormalized_heights,
unnormalized_derivatives=unnormalized_derivatives,
inverse=inverse,
min_bin_width=self.min_bin_width,
min_bin_height=self.min_bin_height,
min_derivative=self.min_derivative,
**spline_kwargs
)
| [
"warnings.warn",
"nflows.transforms.nonlinearities.PiecewiseRationalQuadraticCDF",
"torch.cat",
"numpy.sqrt"
] | [((2480, 2574), 'torch.cat', 'torch.cat', (['[unnormalized_derivatives, unnormalized_derivatives[..., 0][..., None]]'], {'dim': '(-1)'}), '([unnormalized_derivatives, unnormalized_derivatives[..., 0][...,\n None]], dim=-1)\n', (2489, 2574), False, 'import torch\n'), ((2676, 2719), 'numpy.sqrt', 'np.sqrt', (['self.transform_net.hidden_features'], {}), '(self.transform_net.hidden_features)\n', (2683, 2719), True, 'import numpy as np\n'), ((2756, 2799), 'numpy.sqrt', 'np.sqrt', (['self.transform_net.hidden_features'], {}), '(self.transform_net.hidden_features)\n', (2763, 2799), True, 'import numpy as np\n'), ((1331, 1571), 'nflows.transforms.nonlinearities.PiecewiseRationalQuadraticCDF', 'PiecewiseRationalQuadraticCDF', ([], {'shape': '([features] + (img_shape if img_shape else []))', 'num_bins': 'num_bins', 'tails': 'tails', 'tail_bound': 'tail_bound', 'min_bin_width': 'min_bin_width', 'min_bin_height': 'min_bin_height', 'min_derivative': 'min_derivative'}), '(shape=[features] + (img_shape if img_shape else\n []), num_bins=num_bins, tails=tails, tail_bound=tail_bound,\n min_bin_width=min_bin_width, min_bin_height=min_bin_height,\n min_derivative=min_derivative)\n', (1360, 1571), False, 'from nflows.transforms.nonlinearities import PiecewiseCubicCDF, PiecewiseLinearCDF, PiecewiseQuadraticCDF, PiecewiseRationalQuadraticCDF\n'), ((2896, 2939), 'numpy.sqrt', 'np.sqrt', (['self.transform_net.hidden_channels'], {}), '(self.transform_net.hidden_channels)\n', (2903, 2939), True, 'import numpy as np\n'), ((2976, 3019), 'numpy.sqrt', 'np.sqrt', (['self.transform_net.hidden_channels'], {}), '(self.transform_net.hidden_channels)\n', (2983, 3019), True, 'import numpy as np\n'), ((3046, 3139), 'warnings.warn', 'warnings.warn', (['"""Inputs to the softmax are not scaled down: initialization might be bad."""'], {}), "(\n 'Inputs to the softmax are not scaled down: initialization might be bad.')\n", (3059, 3139), False, 'import warnings\n')] |
import argparse
import numpy as np
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
parser = argparse.ArgumentParser(
description="Configure your configuration settings.")
parser.add_argument("config_file_path")
args = parser.parse_args()
def main():
global args
config_file_name = args.config_file_path
window = tk.Tk()
window.title('Marley Accel Setup')
window.geometry('1200x600')
heading = tk.Label(window, text='Settings')
heading.grid(row=0, column=1)
entries = {}
field_names = list(DefaultSettings().get().keys())
for idx, field_name in enumerate(field_names):
base = tk.Label(window, text=field_name)
base.grid(row=idx + 1, column=0)
entries[field_name] = tk.Entry(window)
entries[field_name].grid(row=idx + 1, column=1, ipadx='100')
init_entries(entries, config_file_name)
reset = tk.Button(window,
text='Reset',
fg='black',
bg='red',
command=reset_entries(entries,
DefaultSettings().get()))
reset.place(x=180, y=225)
submit = tk.Button(window,
text='Apply',
fg='Black',
bg='Red',
command=submit_entries(config_file_name, entries,
field_names, draw_accel_plot,
window))
submit.place(x=250, y=225)
# map entries to dict of name to value. Use to draw accel plot
settings = entries_to_dict(entries)
draw_accel_plot(window, settings)
window.mainloop()
def init_entries(entries, config_file_name):
"""
initialize entries to values in the config file
"""
with open(config_file_name, 'r') as config_file:
lines = config_file.readlines()
config_map = dict([line.split('=') for line in lines])
for name in entries.keys():
entries[name].delete(0, tk.END)
entries[name].insert(0, config_map[name].strip())
def entries_to_dict(entries):
"""
Convert the dict containing entries to a dict mapping to entries' values.
"""
settings = [(key, entry.get()) for (key, entry) in entries.items()]
settings = dict(settings)
for name in settings.keys():
# if the setting can't be converted to float, use the default.
try:
settings[name] = float(settings[name])
except ValueError:
settings[name] = float(DefaultSettings().get()[name])
return settings
def draw_command(window, entries):
"""
Wrapper for function that draw the plot, so it can be used as a
command.
"""
settings = entries_to_dict(entries)
def draw_func():
draw_accel_plot(window, settings)
return draw_func
def draw_accel_plot(window, settings):
fig = Figure(dpi=100, tight_layout=False)
fig.suptitle("Acceleration Grids")
rate = np.arange(0, 25, .1)
accel_sens = [
simple_accel(rate[i], settings) for i in range(rate.shape[0])
]
fig.add_subplot(111, xlabel='Mouse Velocity',
ylabel='Sensitivity').plot(rate, accel_sens)
canvas = FigureCanvasTkAgg(fig, master=window)
canvas.draw()
canvas.get_tk_widget().place(x=450, y=10)
def reset_entries(entries, default_settings):
def reset_func():
for name in default_settings:
entries[name].delete(0, tk.END)
entries[name].insert(0, default_settings[name])
return reset_func
def submit_entries(config_file_name, entries, names, draw_func, window):
def submission():
# write entries to config file.
with open(config_file_name, 'w+') as config_file:
for name in names:
config_file.write(name + '=' + entries[name].get() + '\n')
# redraw the accel plot
settings = entries_to_dict(entries)
draw_func(window, settings)
return submission
class DefaultSettings:
"""
Stronger guarantee that default settings won't be changed during
program execution.
"""
def __init__(self):
self.__default_settings = {
'base': 1.0,
'offset': 0.0,
'upper_bound': 10000.0, # arbitrary large float
'accel_rate': 0.0,
'power': 2.0,
'game_sens': 1.0,
'overflow_lim': 127, # C signed char max
'pre_scalar_x': 1.0,
'pre_scalar_y': 1.0,
'post_scalar_x': 1.0,
'post_scalar_y': 1.0
}
def get(self):
return self.__default_settings.copy()
def simple_accel(rate, args):
# apply acceleration to mouse movements rate of change
# ignores pre- and post-scalars
change = max(rate - args['offset'], 0.0)
unbound = args['base'] + (args['accel_rate'] * change)**(args['power'] - 1)
bound = min(unbound, args['upper_bound'])
sens = bound / args['game_sens']
return sens
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"tkinter.Entry",
"matplotlib.figure.Figure",
"numpy.arange",
"tkinter.Label",
"tkinter.Tk",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] | [((238, 315), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Configure your configuration settings."""'}), "(description='Configure your configuration settings.')\n", (261, 315), False, 'import argparse\n'), ((477, 484), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (482, 484), True, 'import tkinter as tk\n'), ((571, 604), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Settings"""'}), "(window, text='Settings')\n", (579, 604), True, 'import tkinter as tk\n'), ((3078, 3113), 'matplotlib.figure.Figure', 'Figure', ([], {'dpi': '(100)', 'tight_layout': '(False)'}), '(dpi=100, tight_layout=False)\n', (3084, 3113), False, 'from matplotlib.figure import Figure\n'), ((3164, 3185), 'numpy.arange', 'np.arange', (['(0)', '(25)', '(0.1)'], {}), '(0, 25, 0.1)\n', (3173, 3185), True, 'import numpy as np\n'), ((3408, 3445), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig'], {'master': 'window'}), '(fig, master=window)\n', (3425, 3445), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((779, 812), 'tkinter.Label', 'tk.Label', (['window'], {'text': 'field_name'}), '(window, text=field_name)\n', (787, 812), True, 'import tkinter as tk\n'), ((884, 900), 'tkinter.Entry', 'tk.Entry', (['window'], {}), '(window)\n', (892, 900), True, 'import tkinter as tk\n')] |
# PyVot Python Variational Optimal Transportation
# Author: <NAME> <<EMAIL>>
# Date: April 28th 2020
# Licence: MIT
import os
import sys
import time
import numpy as np
import sklearn.datasets
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from vot_numpy import VOT, VOTREG
import utils
# Generate data
N0 = 2000
K = 100
x, _ = sklearn.datasets.make_moons(n_samples=N0, noise=0.05, random_state=1)
y, labels = sklearn.datasets.make_moons(n_samples=K, noise=0.05, random_state=1)
y -= [0.5, 0.25]
x -= [0.5, 0.25]
y *= 0.5
x *= 0.5
theta = np.radians(45)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
y = y.dot(R)
# -------------------------------------- #
# --------- w/o regularization --------- #
# -------------------------------------- #
# ----- plot before ----- #
xmin, xmax, ymin, ymax = -1., 1., -1., 1.
cxs_base = np.array((utils.COLOR_LIGHT_BLUE, utils.COLOR_LIGHT_RED))
cys_base = np.array((utils.COLOR_BLUE, utils.COLOR_RED))
cys = cys_base[labels]
ys, xs = 15, 3
plt.figure(figsize=(12, 7))
plt.subplot(231)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.grid(True)
plt.title('w/o reg before')
plt.scatter(x[:, 0], x[:, 1], s=xs, color=utils.COLOR_LIGHT_GREY)
for p, cy in zip(y, cys):
plt.scatter(p[0], p[1], s=ys, color=cy)
# ------- run WM -------- #
vot = VOT(y.copy(), [x.copy()], label_y=labels, verbose=False)
print("running Wasserstein clustering...")
tick = time.time()
vot.cluster(lr=0.5, max_iter_y=1)
tock = time.time()
print("total running time : {0:.4f} seconds".format(tock-tick))
cxs = cxs_base[vot.label_x[0]]
# ------ plot map ------- #
fig232 = plt.subplot(232)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.grid(True)
plt.title('w/o reg map')
for p, p0 in zip(vot.y, vot.y_original):
plt.plot([p[0], p0[0]], [p[1], p0[1]], color=np.append(utils.COLOR_LIGHT_GREY, 0.5), zorder=4)
for p, cy in zip(vot.y, cys):
plt.scatter(p[0], p[1], s=ys, color=cy, facecolor='none', zorder=3)
for p, cy in zip(vot.y_original, cys):
plt.scatter(p[0], p[1], s=ys, color=cy, zorder=2)
# ------ plot after ----- #
plt.subplot(233)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.grid(True)
plt.title('w/o reg after')
for px, cx in zip(vot.x[0], cxs):
plt.scatter(px[0], px[1], s=xs, color=cx, zorder=2)
for py, cy in zip(vot.y, cys):
plt.scatter(py[0], py[1], s=ys, color=cy, facecolor='none', zorder=3)
# -------------------------------------- #
# --------- w/ regularization ---------- #
# -------------------------------------- #
# ------- run RWM ------- #
vot_reg = VOTREG(y.copy(), [x.copy()], label_y=labels, verbose=False)
print("running regularized Wasserstein clustering...")
tick = time.time()
vot_reg.map(reg_type='transform', reg=10, max_iter_y=5)
tock = time.time()
print("total running time : {0:.4f} seconds".format(tock-tick))
cxs = cxs_base[vot_reg.label_x[0]]
# ------- plot map ------ #
plt.subplot(235)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.grid(True)
plt.title('w/ reg map')
for p, p0 in zip(vot_reg.y, vot_reg.y_original):
plt.plot([p[0], p0[0]], [p[1], p0[1]], color=np.append(utils.COLOR_LIGHT_GREY, 0.5), zorder=4)
for p, cy in zip(vot_reg.y, cys):
plt.scatter(p[0], p[1], s=ys, color=cy, facecolor='none', zorder=3)
for p, cy in zip(vot_reg.y_original, cys):
plt.scatter(p[0], p[1], s=ys, color=cy, zorder=2)
# ------ plot after ----- #
plt.subplot(236)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.grid(True)
plt.title('w/ reg after')
for px, cx in zip(vot_reg.x[0], cxs):
plt.scatter(px[0], px[1], s=xs, color=cx, zorder=2)
for py, cy in zip(vot_reg.y, cys):
plt.scatter(py[0], py[1], s=ys, color=cy, facecolor='none', zorder=3)
# ---- plot and save ---- #
plt.tight_layout(pad=1.0, w_pad=1.5, h_pad=0.5)
# plt.savefig("transform.png")
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.radians",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"os.path.abspath",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"time.time",
"numpy.append",
"matplotlib.pyplot.figure",
"n... | [((605, 619), 'numpy.radians', 'np.radians', (['(45)'], {}), '(45)\n', (615, 619), True, 'import numpy as np\n'), ((660, 687), 'numpy.array', 'np.array', (['((c, -s), (s, c))'], {}), '(((c, -s), (s, c)))\n', (668, 687), True, 'import numpy as np\n'), ((914, 971), 'numpy.array', 'np.array', (['(utils.COLOR_LIGHT_BLUE, utils.COLOR_LIGHT_RED)'], {}), '((utils.COLOR_LIGHT_BLUE, utils.COLOR_LIGHT_RED))\n', (922, 971), True, 'import numpy as np\n'), ((983, 1028), 'numpy.array', 'np.array', (['(utils.COLOR_BLUE, utils.COLOR_RED)'], {}), '((utils.COLOR_BLUE, utils.COLOR_RED))\n', (991, 1028), True, 'import numpy as np\n'), ((1068, 1095), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (1078, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1112), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (1107, 1112), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1133), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1121, 1133), True, 'import matplotlib.pyplot as plt\n'), ((1134, 1154), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (1142, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1169), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1163, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1197), 'matplotlib.pyplot.title', 'plt.title', (['"""w/o reg before"""'], {}), "('w/o reg before')\n", (1179, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1264), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 0]', 'x[:, 1]'], {'s': 'xs', 'color': 'utils.COLOR_LIGHT_GREY'}), '(x[:, 0], x[:, 1], s=xs, color=utils.COLOR_LIGHT_GREY)\n', (1210, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1489), 'time.time', 'time.time', ([], {}), '()\n', (1487, 1489), False, 'import time\n'), ((1531, 1542), 'time.time', 'time.time', ([], {}), '()\n', (1540, 1542), False, 'import time\n'), ((1677, 1693), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (1688, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1714), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1702, 1714), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1735), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (1723, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1750), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1744, 1750), True, 'import matplotlib.pyplot as plt\n'), ((1751, 1775), 'matplotlib.pyplot.title', 'plt.title', (['"""w/o reg map"""'], {}), "('w/o reg map')\n", (1760, 1775), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2158), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (2153, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2179), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (2167, 2179), True, 'import matplotlib.pyplot as plt\n'), ((2180, 2200), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (2188, 2200), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2215), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2209, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2216, 2242), 'matplotlib.pyplot.title', 'plt.title', (['"""w/o reg after"""'], {}), "('w/o reg after')\n", (2225, 2242), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2742), 'time.time', 'time.time', ([], {}), '()\n', (2740, 2742), False, 'import time\n'), ((2806, 2817), 'time.time', 'time.time', ([], {}), '()\n', (2815, 2817), False, 'import time\n'), ((2946, 2962), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (2957, 2962), True, 'import matplotlib.pyplot as plt\n'), ((2963, 2983), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (2971, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2984, 3004), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (2992, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3019), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3013, 3019), True, 'import matplotlib.pyplot as plt\n'), ((3020, 3043), 'matplotlib.pyplot.title', 'plt.title', (['"""w/ reg map"""'], {}), "('w/ reg map')\n", (3029, 3043), True, 'import matplotlib.pyplot as plt\n'), ((3425, 3441), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (3436, 3441), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3462), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (3450, 3462), True, 'import matplotlib.pyplot as plt\n'), ((3463, 3483), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (3471, 3483), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3498), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3492, 3498), True, 'import matplotlib.pyplot as plt\n'), ((3499, 3524), 'matplotlib.pyplot.title', 'plt.title', (['"""w/ reg after"""'], {}), "('w/ reg after')\n", (3508, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3758, 3805), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1.0)', 'w_pad': '(1.5)', 'h_pad': '(0.5)'}), '(pad=1.0, w_pad=1.5, h_pad=0.5)\n', (3774, 3805), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3847), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3845, 3847), True, 'import matplotlib.pyplot as plt\n'), ((627, 640), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (633, 640), True, 'import numpy as np\n'), ((642, 655), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (648, 655), True, 'import numpy as np\n'), ((1295, 1334), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[0]', 'p[1]'], {'s': 'ys', 'color': 'cy'}), '(p[0], p[1], s=ys, color=cy)\n', (1306, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1951, 2018), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[0]', 'p[1]'], {'s': 'ys', 'color': 'cy', 'facecolor': '"""none"""', 'zorder': '(3)'}), "(p[0], p[1], s=ys, color=cy, facecolor='none', zorder=3)\n", (1962, 2018), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2111), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[0]', 'p[1]'], {'s': 'ys', 'color': 'cy', 'zorder': '(2)'}), '(p[0], p[1], s=ys, color=cy, zorder=2)\n', (2073, 2111), True, 'import matplotlib.pyplot as plt\n'), ((2282, 2333), 'matplotlib.pyplot.scatter', 'plt.scatter', (['px[0]', 'px[1]'], {'s': 'xs', 'color': 'cx', 'zorder': '(2)'}), '(px[0], px[1], s=xs, color=cx, zorder=2)\n', (2293, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2369, 2438), 'matplotlib.pyplot.scatter', 'plt.scatter', (['py[0]', 'py[1]'], {'s': 'ys', 'color': 'cy', 'facecolor': '"""none"""', 'zorder': '(3)'}), "(py[0], py[1], s=ys, color=cy, facecolor='none', zorder=3)\n", (2380, 2438), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3298), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[0]', 'p[1]'], {'s': 'ys', 'color': 'cy', 'facecolor': '"""none"""', 'zorder': '(3)'}), "(p[0], p[1], s=ys, color=cy, facecolor='none', zorder=3)\n", (3242, 3298), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3395), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[0]', 'p[1]'], {'s': 'ys', 'color': 'cy', 'zorder': '(2)'}), '(p[0], p[1], s=ys, color=cy, zorder=2)\n', (3357, 3395), True, 'import matplotlib.pyplot as plt\n'), ((3568, 3619), 'matplotlib.pyplot.scatter', 'plt.scatter', (['px[0]', 'px[1]'], {'s': 'xs', 'color': 'cx', 'zorder': '(2)'}), '(px[0], px[1], s=xs, color=cx, zorder=2)\n', (3579, 3619), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3728), 'matplotlib.pyplot.scatter', 'plt.scatter', (['py[0]', 'py[1]'], {'s': 'ys', 'color': 'cy', 'facecolor': '"""none"""', 'zorder': '(3)'}), "(py[0], py[1], s=ys, color=cy, facecolor='none', zorder=3)\n", (3670, 3728), True, 'import matplotlib.pyplot as plt\n'), ((273, 298), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (288, 298), False, 'import os\n'), ((1867, 1905), 'numpy.append', 'np.append', (['utils.COLOR_LIGHT_GREY', '(0.5)'], {}), '(utils.COLOR_LIGHT_GREY, 0.5)\n', (1876, 1905), True, 'import numpy as np\n'), ((3143, 3181), 'numpy.append', 'np.append', (['utils.COLOR_LIGHT_GREY', '(0.5)'], {}), '(utils.COLOR_LIGHT_GREY, 0.5)\n', (3152, 3181), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description: Using a two-layer network to predict the ozone layer thickness
from data above Palmerston North in New Zealand between 1996 and 2004.
"""
from pylab import *
import numpy as np #numerical package for scientific computing
import mlpcn
#ozone layer thickness above Palmerston North in New Zealand between 1996 and 2004
pnoz = loadtxt('data/PNoz.data')
#create [day,ozone] array
#inputs = np.concatenate((np.transpose(np.ones((1,np.shape(pnoz)[0]))*np.arange(np.shape(pnoz)[0])),np.transpose(np.ones((1,np.shape(pnoz)[0]))*pnoz[:,2])),axis=1)
#normalise data
pnoz[:,2] = pnoz[:,2]- pnoz[:,2].mean()
pnoz[:,2] = pnoz[:,2]/pnoz[:,2].max()
#assemble input vectors: x(a+t) = f(x(a),x(a-t),x(a-2t),...,x(a-kt))
t = 1 #stepsize
k = 4 #k points in the past used to predict the future point
lastPoint = np.shape(pnoz)[0]-t*(k+1)
inputs = np.zeros((lastPoint,k))
targets = np.zeros((lastPoint,1))
for i in range(lastPoint):
inputs[i,:] = pnoz[i:i+t*k:t,2]
targets[i] = pnoz[i+t*(k+1),2]
train = inputs[:-400:2,:]
traintarget = targets[:-400:2]
valid = inputs[1:-400:2,:]
validtarget = targets[1:-400:2]
test = inputs[-400:,:]
testtarget = targets[-400:]
"""
# randomly order the data
change = np.arange(np.shape(inputs)[0])
np.random.shuffle(change)
inputs = inputs[change,:]
targets = targets[change,:]
"""
#plot ozone versus days
xlabel('Days')
ylabel('normalized ozone')
plot(np.arange(0,2*np.shape(pnoz[:-400:2,2])[0],2),pnoz[:-400:2,2],'.r',label='train data')
plot(np.arange(1,2*np.shape(pnoz[1:-400:2,2])[0],2),pnoz[1:-400:2,2],'.g',label='valid data')
plot(np.arange(np.shape(pnoz[:-400,2])[0],np.shape(pnoz[:,2])[0],1),pnoz[-400:,2],'.b',label='test data')
legend(loc = 'upper right')
show()
net = mlpcn.mlpcn(train,traintarget,4,0.2,'linear','batch')
trainerror = np.array([])
validerror = np.array([])
print('\nStart Train Error',net.errfunc(net.mlpfwd(train,True)[1],traintarget))
print('Start Valid Error',net.errfunc(net.mlpfwd(valid,True)[1],validtarget))
print('...perceptron training...')
(trainerror,validerror) = net.mlptrain_automatic(valid,validtarget,100)
#for n in range(100):
# trainerror = np.append(trainerror,net.errfunc(net.mlpfwd(train,True)[1],traintarget))
# validerror = np.append(validerror,net.errfunc(net.mlpfwd(valid,True)[1],validtarget))
# net.mlptrain(100)
print('Final Train Error',net.errfunc(net.mlpfwd(train,True)[1],traintarget))
print('Final Valid Error',net.errfunc(net.mlpfwd(valid,True)[1],validtarget))
plot(np.arange(len(trainerror)),trainerror,'-b',label = 'train error')
plot(np.arange(len(validerror)),validerror,'-r',label = 'valid error')
legend(loc = 'upper right')
show()
testout = net.mlpfwd(test,True)[1]
print('Test Error:',net.errfunc(testout,testtarget))
plot(np.arange(np.shape(test)[0]),testout,'.')
plot(np.arange(np.shape(test)[0]),testtarget,'x')
legend(('Predictions','Targets'))
show()
| [
"numpy.array",
"numpy.shape",
"numpy.zeros",
"mlpcn.mlpcn"
] | [((898, 922), 'numpy.zeros', 'np.zeros', (['(lastPoint, k)'], {}), '((lastPoint, k))\n', (906, 922), True, 'import numpy as np\n'), ((932, 956), 'numpy.zeros', 'np.zeros', (['(lastPoint, 1)'], {}), '((lastPoint, 1))\n', (940, 956), True, 'import numpy as np\n'), ((1779, 1837), 'mlpcn.mlpcn', 'mlpcn.mlpcn', (['train', 'traintarget', '(4)', '(0.2)', '"""linear"""', '"""batch"""'], {}), "(train, traintarget, 4, 0.2, 'linear', 'batch')\n", (1790, 1837), False, 'import mlpcn\n'), ((1848, 1860), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1856, 1860), True, 'import numpy as np\n'), ((1874, 1886), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1882, 1886), True, 'import numpy as np\n'), ((863, 877), 'numpy.shape', 'np.shape', (['pnoz'], {}), '(pnoz)\n', (871, 877), True, 'import numpy as np\n'), ((1646, 1670), 'numpy.shape', 'np.shape', (['pnoz[:-400, 2]'], {}), '(pnoz[:-400, 2])\n', (1654, 1670), True, 'import numpy as np\n'), ((1673, 1693), 'numpy.shape', 'np.shape', (['pnoz[:, 2]'], {}), '(pnoz[:, 2])\n', (1681, 1693), True, 'import numpy as np\n'), ((2824, 2838), 'numpy.shape', 'np.shape', (['test'], {}), '(test)\n', (2832, 2838), True, 'import numpy as np\n'), ((2871, 2885), 'numpy.shape', 'np.shape', (['test'], {}), '(test)\n', (2879, 2885), True, 'import numpy as np\n'), ((1464, 1490), 'numpy.shape', 'np.shape', (['pnoz[:-400:2, 2]'], {}), '(pnoz[:-400:2, 2])\n', (1472, 1490), True, 'import numpy as np\n'), ((1556, 1583), 'numpy.shape', 'np.shape', (['pnoz[1:-400:2, 2]'], {}), '(pnoz[1:-400:2, 2])\n', (1564, 1583), True, 'import numpy as np\n')] |
# Taken from https://github.com/psclklnk/spdl
# Copy of the license at TeachMyAgent/teachers/LICENSES/SPDL
from TeachMyAgent.teachers.utils.torch import get_gradient, zero_grad
import numpy as np
import torch
def _fisher_vector_product_t(p, kl_fun, param_fun, cg_damping):
kl = kl_fun()
grads = torch.autograd.grad(kl, param_fun(), create_graph=True, retain_graph=True)
flat_grad_kl = torch.cat([grad.view(-1) for grad in grads])
kl_v = torch.sum(flat_grad_kl * p)
grads_v = torch.autograd.grad(kl_v, param_fun(), create_graph=False, retain_graph=True)
flat_grad_grad_kl = torch.cat([grad.contiguous().view(-1) for grad in grads_v]).data
return flat_grad_grad_kl + p * cg_damping
def _fisher_vector_product(p, kl_fun, param_fun, cg_damping, use_cuda=False):
p_tensor = torch.from_numpy(p)
if use_cuda:
p_tensor = p_tensor.cuda()
return _fisher_vector_product_t(p_tensor, kl_fun, param_fun, cg_damping)
def _conjugate_gradient(b, kl_fun, param_fun, cg_damping, n_epochs_cg, cg_residual_tol, use_cuda=False):
p = b.detach().cpu().numpy()
r = b.detach().cpu().numpy()
x = np.zeros_like(p)
r2 = r.dot(r)
for i in range(n_epochs_cg):
z = _fisher_vector_product(p, kl_fun, param_fun, cg_damping, use_cuda=use_cuda).detach().cpu().numpy()
v = r2 / p.dot(z)
x += v * p
r -= v * z
r2_new = r.dot(r)
mu = r2_new / r2
p = r + mu * p
r2 = r2_new
if r2 < cg_residual_tol:
break
return x
def cg_step(loss_fun, kl_fun, max_kl, param_fun, weight_setter, weight_getter, cg_damping, n_epochs_cg,
cg_residual_tol, n_epochs_line_search, use_cuda=False):
zero_grad(param_fun())
loss = loss_fun()
prev_loss = loss.item()
loss.backward(retain_graph=True)
g = get_gradient(param_fun())
if np.linalg.norm(g) < 1e-10:
print("Gradient norm smaller than 1e-10, skipping gradient step!")
return
else:
if torch.any(torch.isnan(g)) or torch.any(torch.isinf(g)):
raise RuntimeError("Nans and Infs in gradient")
stepdir = _conjugate_gradient(g, kl_fun, param_fun, cg_damping, n_epochs_cg,
cg_residual_tol, use_cuda=False)
if np.any(np.isnan(stepdir)) or np.any(np.isinf(stepdir)):
raise RuntimeError("Computation of conjugate gradient resulted in NaNs or Infs")
_line_search(prev_loss, stepdir, loss_fun, kl_fun, max_kl, param_fun, weight_setter, weight_getter, cg_damping,
n_epochs_line_search, use_cuda=use_cuda)
def _line_search(prev_loss, stepdir, loss_fun, kl_fun, max_kl, param_fun, weight_setter, weight_getter,
cg_damping, n_epochs_line_search, use_cuda=False):
# Compute optimal step size
direction = _fisher_vector_product(stepdir, kl_fun, param_fun, cg_damping, use_cuda=use_cuda).detach().cpu().numpy()
shs = .5 * stepdir.dot(direction)
lm = np.sqrt(shs / max_kl)
full_step = stepdir / lm
stepsize = 1.
# Save old policy parameters
theta_old = weight_getter()
# Perform Line search
violation = True
for _ in range(n_epochs_line_search):
theta_new = theta_old + full_step * stepsize
weight_setter(theta_new)
new_loss = loss_fun()
kl = kl_fun()
improve = new_loss - prev_loss
if kl <= max_kl * 1.5 or improve >= 0:
violation = False
break
stepsize *= .5
if violation:
print("WARNING! KL-Divergence bound violation after linesearch")
weight_setter(theta_old)
| [
"torch.from_numpy",
"numpy.zeros_like",
"torch.isinf",
"numpy.isinf",
"numpy.isnan",
"numpy.linalg.norm",
"torch.sum",
"torch.isnan",
"numpy.sqrt"
] | [((457, 484), 'torch.sum', 'torch.sum', (['(flat_grad_kl * p)'], {}), '(flat_grad_kl * p)\n', (466, 484), False, 'import torch\n'), ((808, 827), 'torch.from_numpy', 'torch.from_numpy', (['p'], {}), '(p)\n', (824, 827), False, 'import torch\n'), ((1139, 1155), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (1152, 1155), True, 'import numpy as np\n'), ((3000, 3021), 'numpy.sqrt', 'np.sqrt', (['(shs / max_kl)'], {}), '(shs / max_kl)\n', (3007, 3021), True, 'import numpy as np\n'), ((1872, 1889), 'numpy.linalg.norm', 'np.linalg.norm', (['g'], {}), '(g)\n', (1886, 1889), True, 'import numpy as np\n'), ((2020, 2034), 'torch.isnan', 'torch.isnan', (['g'], {}), '(g)\n', (2031, 2034), False, 'import torch\n'), ((2049, 2063), 'torch.isinf', 'torch.isinf', (['g'], {}), '(g)\n', (2060, 2063), False, 'import torch\n'), ((2301, 2318), 'numpy.isnan', 'np.isnan', (['stepdir'], {}), '(stepdir)\n', (2309, 2318), True, 'import numpy as np\n'), ((2330, 2347), 'numpy.isinf', 'np.isinf', (['stepdir'], {}), '(stepdir)\n', (2338, 2347), True, 'import numpy as np\n')] |
import librosa
import os
import numpy as np
import scipy.io.wavfile as wavfile
RANGE = (0,2000)
if(not os.path.isdir('norm_audio_train')):
os.mkdir('norm_audio_train')
for num in range(RANGE[0],RANGE[1]):
path = 'audio_train/trim_audio_train%s.wav'% num
norm_path = 'norm_audio_train/trim_audio_train%s.wav'% num
if (os.path.exists(path)):
audio,_= librosa.load(path,sr=16000)
max = np.max(np.abs(audio))
norm_audio = np.divide(audio,max)
wavfile.write(norm_path,16000,norm_audio)
| [
"os.mkdir",
"numpy.divide",
"numpy.abs",
"os.path.isdir",
"os.path.exists",
"scipy.io.wavfile.write",
"librosa.load"
] | [((105, 138), 'os.path.isdir', 'os.path.isdir', (['"""norm_audio_train"""'], {}), "('norm_audio_train')\n", (118, 138), False, 'import os\n'), ((145, 173), 'os.mkdir', 'os.mkdir', (['"""norm_audio_train"""'], {}), "('norm_audio_train')\n", (153, 173), False, 'import os\n'), ((336, 356), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (350, 356), False, 'import os\n'), ((376, 404), 'librosa.load', 'librosa.load', (['path'], {'sr': '(16000)'}), '(path, sr=16000)\n', (388, 404), False, 'import librosa\n'), ((461, 482), 'numpy.divide', 'np.divide', (['audio', 'max'], {}), '(audio, max)\n', (470, 482), True, 'import numpy as np\n'), ((490, 533), 'scipy.io.wavfile.write', 'wavfile.write', (['norm_path', '(16000)', 'norm_audio'], {}), '(norm_path, 16000, norm_audio)\n', (503, 533), True, 'import scipy.io.wavfile as wavfile\n'), ((425, 438), 'numpy.abs', 'np.abs', (['audio'], {}), '(audio)\n', (431, 438), True, 'import numpy as np\n')] |
from __future__ import print_function
import baker
import logging
import core.io
from core.cascade import group_offsets
def truncate_data(x, y, qid, docno, k):
"""Truncate each ranked list down to at most k documents"""
import numpy as np
idx = np.concatenate([np.arange(a, min(a + k, b)) for a, b in group_offsets(qid)])
new_docno = docno[idx] if docno is not None else None
return x[idx], y[idx], qid[idx], new_docno
@baker.command
def make_npz(input_file, npz_file, k=0):
"""Convert input data (SVMLight or .npz) into .npz format"""
if input_file.endswith('.npz'):
x, y, qid, docno = core.io.load_npz(input_file)
else:
x, y, qid, docno = core.io.load_svmlight_file(input_file)
# eliminate explicit zeros
x.eliminate_zeros()
# truncate data as necessary
if k:
x, y, qid, docno = truncate_data(x, y, qid, docno, k)
core.io.save_npz(npz_file, x, y, qid, docno)
@baker.command
def merge_npz(*npz_file):
"""Merge multiple npz files (*** EXPERIMENTAL ***)"""
import numpy as np
import scipy.sparse
dest_npz_file = npz_file[-1] # the last filename is the destination
npz_file = npz_file[:-1]
x_list, y_list, qid_list, docno_list = [], [], [], []
for fname in npz_file:
x, y, qid, docno = core.io.load_npz(fname)
x.eliminate_zeros() # eliminate explicit zeros
print(fname, x.shape, y.shape, qid.shape, docno.shape,
'fid:[{}, {}]'.format(x.indices.min(), x.indices.max()))
x_list.append(x)
y_list.append(y)
qid_list.append(qid)
docno_list.append(docno)
n_features = max(x.shape[1] for x in x_list)
for i in range(len(x_list)):
if x_list[i].shape[1] == n_features:
continue
new_shape = (x_list[i].shape[0], n_features)
x_list[i] = scipy.sparse.csr_matrix((x_list[i].data, x_list[i].indices, x_list[i].indptr),
shape=new_shape)
x_new = scipy.sparse.vstack(x_list)
print('x', type(x_new), x_new.shape, 'fid:[{}, {}]'.format(x_new.indices.min(), x_new.indices.max()))
y_new = np.concatenate(y_list)
print('y', type(y_new), y_new.shape)
qid_new = np.concatenate(qid_list)
print('qid', type(qid_new), qid_new.shape)
docno_new = np.concatenate(docno_list)
print('docno', type(docno_new), docno_new.shape)
core.io.save_npz(dest_npz_file, x_new, y_new, qid_new, docno_new)
@baker.command
def show_npz_info(*npz_file):
import numpy as np
for fname in npz_file:
print('filename', fname)
x, y, qid, docno = core.io.load_npz(fname)
if docno is not None:
print('x', x.shape, 'y', y.shape, 'qid', qid.shape, 'docno', docno.shape)
else:
print('x', x.shape, 'y', y.shape, 'qid', qid.shape, 'docno', None)
print('labels:', {int(k): v for k, v in zip(*map(list, np.unique(y, return_counts=True)))})
unique_qid = np.unique(qid)
print('qid (unique):', unique_qid.size)
print(unique_qid)
print()
@baker.command
def make_qrels(data_file, qrels_file):
"""Create qrels from an svmlight or npz file."""
with open(qrels_file, 'wb') as out:
if data_file.endswith('.npz'):
_, y, qid, docno = core.io.load_npz(data_file)
for a, b in group_offsets(qid):
if docno is None:
docno_string = ['%s.%d' % (qid[a], i) for i in range(1, b - a + 1)]
else:
docno_string = docno[a:b]
for d, rel in zip(docno_string, y[a:b]):
out.write('%s 0 %s %d\n' % (qid[a], d, rel))
else:
for qid, docno, rel in core.io.parse_svmlight_into_qrels(data_file):
out.write('%s 0 %s %d\n' % (qid, docno, rel))
@baker.command
def make_svm(*csv_file):
"""Convert CSV files into SVMLight format
Format: <label>,<query id>,<docno>,f1,f2,...,fn
"""
import itertools
import pandas as pd
fid = itertools.count(1)
frames = []
for fname in csv_file:
df = pd.read_csv(fname, sep=',', header=None)
names = (['rel', 'qid', 'docno'] +
['f{}'.format(next(fid)) for _ in range(df.columns.size - 3)])
df.columns = names
frames.append(df)
fid_end = next(fid)
fields = ['f{}'.format(i) for i in range(1, fid_end)]
fids = ['{}'.format(i) for i in range(1, fid_end)]
# merge data frames
df_all = reduce(lambda l, r: pd.merge(l, r, how='inner', on=['qid', 'docno']), frames)
df_all['rel'] = df_all['rel_x'].astype(int)
df_all['qid'] = df_all['qid'].astype(str)
df_all['docno'] = df_all['docno'].astype(str)
print(df_all.head())
for index, row in df_all.iterrows():
vector = ' '.join(['{}:{:.6f}'.format(k, v) for k, v in zip(fids, row[fields])])
print('{rel} qid:{qid} {vector} # {docno}'.format(
rel=row['rel_x'], qid=row['qid'], vector=vector, docno=row['docno']))
@baker.command
def make_run(data_file, scores_file, generate_docno=True):
"""Create run file from an svmlight/npz file and a scores file"""
scores = core.io.load_scores(scores_file)
if data_file.endswith('.npz'):
_, y, qid = core.io.load_npz(data_file)
for a, b in group_offsets(qid):
for i in range(1, b - a + 1):
docno = '%s.%d' % (qid[a], i)
print('%s Q0 %s 0 %f %s' % (qid[a], docno, scores[a + i - 1], 'eval.py'))
else:
qrels = core.io.parse_svmlight_into_qrels(data_file, generate_docno=generate_docno)
for (qid, docno, _), score in zip(qrels, scores):
print('%s Q0 %s 0 %f %s' % (qid, docno, score, 'eval.py'))
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
baker.run()
| [
"numpy.concatenate",
"logging.basicConfig",
"pandas.read_csv",
"pandas.merge",
"itertools.count",
"baker.run",
"core.cascade.group_offsets",
"numpy.unique"
] | [((2157, 2179), 'numpy.concatenate', 'np.concatenate', (['y_list'], {}), '(y_list)\n', (2171, 2179), True, 'import numpy as np\n'), ((2235, 2259), 'numpy.concatenate', 'np.concatenate', (['qid_list'], {}), '(qid_list)\n', (2249, 2259), True, 'import numpy as np\n'), ((2323, 2349), 'numpy.concatenate', 'np.concatenate', (['docno_list'], {}), '(docno_list)\n', (2337, 2349), True, 'import numpy as np\n'), ((4058, 4076), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (4073, 4076), False, 'import itertools\n'), ((5803, 5898), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (5822, 5898), False, 'import logging\n'), ((5908, 5919), 'baker.run', 'baker.run', ([], {}), '()\n', (5917, 5919), False, 'import baker\n'), ((2988, 3002), 'numpy.unique', 'np.unique', (['qid'], {}), '(qid)\n', (2997, 3002), True, 'import numpy as np\n'), ((4133, 4173), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '""","""', 'header': 'None'}), "(fname, sep=',', header=None)\n", (4144, 4173), True, 'import pandas as pd\n'), ((5341, 5359), 'core.cascade.group_offsets', 'group_offsets', (['qid'], {}), '(qid)\n', (5354, 5359), False, 'from core.cascade import group_offsets\n'), ((3364, 3382), 'core.cascade.group_offsets', 'group_offsets', (['qid'], {}), '(qid)\n', (3377, 3382), False, 'from core.cascade import group_offsets\n'), ((4547, 4595), 'pandas.merge', 'pd.merge', (['l', 'r'], {'how': '"""inner"""', 'on': "['qid', 'docno']"}), "(l, r, how='inner', on=['qid', 'docno'])\n", (4555, 4595), True, 'import pandas as pd\n'), ((318, 336), 'core.cascade.group_offsets', 'group_offsets', (['qid'], {}), '(qid)\n', (331, 336), False, 'from core.cascade import group_offsets\n'), ((2929, 2961), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (2938, 2961), True, 'import numpy as np\n')] |
import glob
import os
import random
import sys
import argparse
import numpy as np
from config import BabiConfig, BabiConfigJoint
from train_test import train, train_linear_start, test
from util import parse_babi_task, build_model
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val) # for reproducing
def run_task(data_dir, task_id):
"""
Train and test for each task
"""
print("Train and test for task %d ..." % task_id)
# Parse data
train_files = glob.glob('%s/qa%d_*_train.txt' % (data_dir, task_id))
test_files = glob.glob('%s/qa%d_*_test.txt' % (data_dir, task_id))
dictionary = {"nil": 0}
train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)
test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)
general_config = BabiConfig(train_story, train_questions, dictionary)
memory, model, loss = build_model(general_config)
if general_config.linear_start:
train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)
else:
train(train_story, train_questions, train_qstory, memory, model, loss, general_config)
test(test_story, test_questions, test_qstory, memory, model, loss, general_config)
def run_all_tasks(data_dir):
"""
Train and test for all tasks
"""
print("Training and testing for all tasks ...")
for t in range(20):
run_task(data_dir, task_id=t + 1)
def run_joint_tasks(data_dir):
"""
Train and test for all tasks but the trained model is built using training data from all tasks.
"""
print("Jointly train and test for all tasks ...")
tasks = range(20)
# Parse training data
train_data_path = []
for t in tasks:
train_data_path += glob.glob('%s/qa%d_*_train.txt' % (data_dir, t + 1))
dictionary = {"nil": 0}
train_story, train_questions, train_qstory = parse_babi_task(train_data_path, dictionary, False)
# Parse test data for each task so that the dictionary covers all words before training
for t in tasks:
test_data_path = glob.glob('%s/qa%d_*_test.txt' % (data_dir, t + 1))
parse_babi_task(test_data_path, dictionary, False) # ignore output for now
general_config = BabiConfigJoint(train_story, train_questions, dictionary)
memory, model, loss = build_model(general_config)
if general_config.linear_start:
train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)
else:
train(train_story, train_questions, train_qstory, memory, model, loss, general_config)
# Test on each task
for t in tasks:
print("Testing for task %d ..." % (t + 1))
test_data_path = glob.glob('%s/qa%d_*_test.txt' % (data_dir, t + 1))
dc = len(dictionary)
test_story, test_questions, test_qstory = parse_babi_task(test_data_path, dictionary, False)
assert dc == len(dictionary) # make sure that the dictionary already covers all words
test(test_story, test_questions, test_qstory, memory, model, loss, general_config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data-dir", default="data/tasks_1-20_v1-2/en",
help="path to dataset directory (default: %(default)s)")
group = parser.add_mutually_exclusive_group()
group.add_argument("-t", "--task", default="1", type=int,
help="train and test for a single task (default: %(default)s)")
group.add_argument("-a", "--all-tasks", action="store_true",
help="train and test for all tasks (one by one) (default: %(default)s)")
group.add_argument("-j", "--joint-tasks", action="store_true",
help="train and test for all tasks (all together) (default: %(default)s)")
args = parser.parse_args()
# Check if data is available
data_dir = args.data_dir
if not os.path.exists(data_dir):
print("The data directory '%s' does not exist. Please download it first." % data_dir)
sys.exit(1)
print("Using data from %s" % args.data_dir)
if args.all_tasks:
run_all_tasks(data_dir)
elif args.joint_tasks:
run_joint_tasks(data_dir)
else:
run_task(data_dir, task_id=args.task)
| [
"util.parse_babi_task",
"config.BabiConfig",
"numpy.random.seed",
"argparse.ArgumentParser",
"train_test.train",
"config.BabiConfigJoint",
"os.path.exists",
"util.build_model",
"random.seed",
"train_test.test",
"glob.glob",
"train_test.train_linear_start",
"sys.exit"
] | [((247, 268), 'random.seed', 'random.seed', (['seed_val'], {}), '(seed_val)\n', (258, 268), False, 'import random\n'), ((269, 293), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (283, 293), True, 'import numpy as np\n'), ((487, 541), 'glob.glob', 'glob.glob', (["('%s/qa%d_*_train.txt' % (data_dir, task_id))"], {}), "('%s/qa%d_*_train.txt' % (data_dir, task_id))\n", (496, 541), False, 'import glob\n'), ((560, 613), 'glob.glob', 'glob.glob', (["('%s/qa%d_*_test.txt' % (data_dir, task_id))"], {}), "('%s/qa%d_*_test.txt' % (data_dir, task_id))\n", (569, 613), False, 'import glob\n'), ((692, 739), 'util.parse_babi_task', 'parse_babi_task', (['train_files', 'dictionary', '(False)'], {}), '(train_files, dictionary, False)\n', (707, 739), False, 'from util import parse_babi_task, build_model\n'), ((789, 835), 'util.parse_babi_task', 'parse_babi_task', (['test_files', 'dictionary', '(False)'], {}), '(test_files, dictionary, False)\n', (804, 835), False, 'from util import parse_babi_task, build_model\n'), ((858, 910), 'config.BabiConfig', 'BabiConfig', (['train_story', 'train_questions', 'dictionary'], {}), '(train_story, train_questions, dictionary)\n', (868, 910), False, 'from config import BabiConfig, BabiConfigJoint\n'), ((938, 965), 'util.build_model', 'build_model', (['general_config'], {}), '(general_config)\n', (949, 965), False, 'from util import parse_babi_task, build_model\n'), ((1221, 1307), 'train_test.test', 'test', (['test_story', 'test_questions', 'test_qstory', 'memory', 'model', 'loss', 'general_config'], {}), '(test_story, test_questions, test_qstory, memory, model, loss,\n general_config)\n', (1225, 1307), False, 'from train_test import train, train_linear_start, test\n'), ((1957, 2008), 'util.parse_babi_task', 'parse_babi_task', (['train_data_path', 'dictionary', '(False)'], {}), '(train_data_path, dictionary, False)\n', (1972, 2008), False, 'from util import parse_babi_task, build_model\n'), ((2304, 2361), 'config.BabiConfigJoint', 'BabiConfigJoint', (['train_story', 'train_questions', 'dictionary'], {}), '(train_story, train_questions, dictionary)\n', (2319, 2361), False, 'from config import BabiConfig, BabiConfigJoint\n'), ((2388, 2415), 'util.build_model', 'build_model', (['general_config'], {}), '(general_config)\n', (2399, 2415), False, 'from util import parse_babi_task, build_model\n'), ((3197, 3222), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3220, 3222), False, 'import argparse\n'), ((1011, 1114), 'train_test.train_linear_start', 'train_linear_start', (['train_story', 'train_questions', 'train_qstory', 'memory', 'model', 'loss', 'general_config'], {}), '(train_story, train_questions, train_qstory, memory,\n model, loss, general_config)\n', (1029, 1114), False, 'from train_test import train, train_linear_start, test\n'), ((1129, 1219), 'train_test.train', 'train', (['train_story', 'train_questions', 'train_qstory', 'memory', 'model', 'loss', 'general_config'], {}), '(train_story, train_questions, train_qstory, memory, model, loss,\n general_config)\n', (1134, 1219), False, 'from train_test import train, train_linear_start, test\n'), ((1826, 1878), 'glob.glob', 'glob.glob', (["('%s/qa%d_*_train.txt' % (data_dir, t + 1))"], {}), "('%s/qa%d_*_train.txt' % (data_dir, t + 1))\n", (1835, 1878), False, 'import glob\n'), ((2147, 2198), 'glob.glob', 'glob.glob', (["('%s/qa%d_*_test.txt' % (data_dir, t + 1))"], {}), "('%s/qa%d_*_test.txt' % (data_dir, t + 1))\n", (2156, 2198), False, 'import glob\n'), ((2207, 2257), 'util.parse_babi_task', 'parse_babi_task', (['test_data_path', 'dictionary', '(False)'], {}), '(test_data_path, dictionary, False)\n', (2222, 2257), False, 'from util import parse_babi_task, build_model\n'), ((2461, 2564), 'train_test.train_linear_start', 'train_linear_start', (['train_story', 'train_questions', 'train_qstory', 'memory', 'model', 'loss', 'general_config'], {}), '(train_story, train_questions, train_qstory, memory,\n model, loss, general_config)\n', (2479, 2564), False, 'from train_test import train, train_linear_start, test\n'), ((2579, 2669), 'train_test.train', 'train', (['train_story', 'train_questions', 'train_qstory', 'memory', 'model', 'loss', 'general_config'], {}), '(train_story, train_questions, train_qstory, memory, model, loss,\n general_config)\n', (2584, 2669), False, 'from train_test import train, train_linear_start, test\n'), ((2787, 2838), 'glob.glob', 'glob.glob', (["('%s/qa%d_*_test.txt' % (data_dir, t + 1))"], {}), "('%s/qa%d_*_test.txt' % (data_dir, t + 1))\n", (2796, 2838), False, 'import glob\n'), ((2918, 2968), 'util.parse_babi_task', 'parse_babi_task', (['test_data_path', 'dictionary', '(False)'], {}), '(test_data_path, dictionary, False)\n', (2933, 2968), False, 'from util import parse_babi_task, build_model\n'), ((3073, 3159), 'train_test.test', 'test', (['test_story', 'test_questions', 'test_qstory', 'memory', 'model', 'loss', 'general_config'], {}), '(test_story, test_questions, test_qstory, memory, model, loss,\n general_config)\n', (3077, 3159), False, 'from train_test import train, train_linear_start, test\n'), ((4013, 4037), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (4027, 4037), False, 'import os\n'), ((4141, 4152), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4149, 4152), False, 'import sys\n')] |
import os
import cv2
import numpy as np
FROM = "/home/pallab/gestures-cnn/raw-data/thumb"
TO = "/home/pallab/gestures-cnn/images/resized/"
i = 0
os.chdir(FROM)
for image in os.listdir(".")[:300]:
im = cv2.imread(image, 0)
crop = im[200:920, 0:720]
rows, cols = crop.shape
blur = cv2.GaussianBlur(crop, (33, 33), 0)
erosion = cv2.erode(crop, None, iterations = 1)
dilated = cv2.dilate(erosion,None,iterations = 1)
median = cv2.medianBlur(dilated, 7)
tranx = 100*np.random.uniform() #- transrange/2
trany = 100*np.random.uniform() #- transrange/2
M = np.float32([[1,0,tranx],[0,1,trany]])
dst = cv2.warpAffine(median,M,(cols,rows))
cx = 100*np.random.uniform()
cy = 100*np.random.uniform()
angle = np.random.uniform(low = -10.0, high = 10.0)
M = cv2.getRotationMatrix2D((cx/2,cy/2),angle,1)
dst = cv2.warpAffine(dst,M,(cols,rows))
toggle = round(np.random.uniform())
if toggle:
mean, sd = 0, 50**0.5
noise = np.random.normal(mean,sd,(rows,cols))
noise = noise.reshape(rows, cols)
dst = dst + noise
cv2.imwrite(TO+"t_6"+str(i)+".jpg", dst)
i += 1
print(str(i)+" files were processed")
if i!=300:
print("Some files were not processed")
else:
print("Done") | [
"cv2.GaussianBlur",
"numpy.random.uniform",
"cv2.getRotationMatrix2D",
"cv2.dilate",
"cv2.medianBlur",
"numpy.float32",
"cv2.imread",
"cv2.warpAffine",
"os.chdir",
"numpy.random.normal",
"cv2.erode",
"os.listdir"
] | [((146, 160), 'os.chdir', 'os.chdir', (['FROM'], {}), '(FROM)\n', (154, 160), False, 'import os\n'), ((174, 189), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (184, 189), False, 'import os\n'), ((206, 226), 'cv2.imread', 'cv2.imread', (['image', '(0)'], {}), '(image, 0)\n', (216, 226), False, 'import cv2\n'), ((296, 331), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['crop', '(33, 33)', '(0)'], {}), '(crop, (33, 33), 0)\n', (312, 331), False, 'import cv2\n'), ((346, 381), 'cv2.erode', 'cv2.erode', (['crop', 'None'], {'iterations': '(1)'}), '(crop, None, iterations=1)\n', (355, 381), False, 'import cv2\n'), ((398, 437), 'cv2.dilate', 'cv2.dilate', (['erosion', 'None'], {'iterations': '(1)'}), '(erosion, None, iterations=1)\n', (408, 437), False, 'import cv2\n'), ((451, 477), 'cv2.medianBlur', 'cv2.medianBlur', (['dilated', '(7)'], {}), '(dilated, 7)\n', (465, 477), False, 'import cv2\n'), ((591, 633), 'numpy.float32', 'np.float32', (['[[1, 0, tranx], [0, 1, trany]]'], {}), '([[1, 0, tranx], [0, 1, trany]])\n', (601, 633), True, 'import numpy as np\n'), ((639, 678), 'cv2.warpAffine', 'cv2.warpAffine', (['median', 'M', '(cols, rows)'], {}), '(median, M, (cols, rows))\n', (653, 678), False, 'import cv2\n'), ((755, 794), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)'}), '(low=-10.0, high=10.0)\n', (772, 794), True, 'import numpy as np\n'), ((807, 858), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cx / 2, cy / 2)', 'angle', '(1)'], {}), '((cx / 2, cy / 2), angle, 1)\n', (830, 858), False, 'import cv2\n'), ((862, 898), 'cv2.warpAffine', 'cv2.warpAffine', (['dst', 'M', '(cols, rows)'], {}), '(dst, M, (cols, rows))\n', (876, 898), False, 'import cv2\n'), ((495, 514), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (512, 514), True, 'import numpy as np\n'), ((547, 566), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (564, 566), True, 'import numpy as np\n'), ((690, 709), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (707, 709), True, 'import numpy as np\n'), ((723, 742), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (740, 742), True, 'import numpy as np\n'), ((916, 935), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (933, 935), True, 'import numpy as np\n'), ((998, 1038), 'numpy.random.normal', 'np.random.normal', (['mean', 'sd', '(rows, cols)'], {}), '(mean, sd, (rows, cols))\n', (1014, 1038), True, 'import numpy as np\n')] |
from sklearn.base import BaseEstimator
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import KBinsDiscretizer
np.random.seed(7)
EPSILON = 1e-10
# This kit will create a sort of discretized distribution, by using
# non overlapping uniform distribution
class GenerativeRegressor(BaseEstimator):
def __init__(self, max_dists, current_dim):
self.nb_bins = max_dists
self.enc = KBinsDiscretizer(n_bins=self.nb_bins, encode='ordinal')
self.clf = RandomForestClassifier(
n_estimators=40, max_leaf_nodes=150, random_state=61)
def fit(self, X, y):
self.enc.fit(y)
y = self.enc.transform(y)
self.clf.fit(X, y.ravel())
def predict(self, X):
"""Construct a conditional mixture distribution.
Return
------
weights : np.array of float
discrete probabilities of each component of the mixture
types : np.array of int
integer codes referring to component types
see rampwf.utils.distributions_dict
params : np.array of float tuples
parameters for each component in the mixture
"""
# Only uniform distributions
# For the whole list of distributions, run
# import rampwf as rw
# rw.utils.distributions_dict
types = np.ones((len(X), self.nb_bins))
preds_proba = self.clf.predict_proba(X)
weights = np.array(preds_proba)
bins = self.enc.bin_edges_[0].copy()
# Otherwise calling the model will modify the bins when padding
a_array = bins[:-1]
b_array = bins[1:]
# We make sure no value falls outside our coverage
padding = 2
a_array[0] -= padding
b_array[-1] += padding
weights += EPSILON
weights /= np.sum(weights, axis=1)[:, None]
# To get information about the parameters of the distribution you are
# using, you can run
# import rampwf as rw
# [(v,v.params) for v in rw.utils.distributions_dict.values()]
params_uniform = np.empty((len(X), self.nb_bins * 2))
params_uniform[:, 0::2] = a_array
params_uniform[:, 1::2] = b_array
return weights, types, params_uniform
| [
"sklearn.ensemble.RandomForestClassifier",
"numpy.random.seed",
"numpy.sum",
"numpy.array",
"sklearn.preprocessing.KBinsDiscretizer"
] | [((162, 179), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (176, 179), True, 'import numpy as np\n'), ((447, 502), 'sklearn.preprocessing.KBinsDiscretizer', 'KBinsDiscretizer', ([], {'n_bins': 'self.nb_bins', 'encode': '"""ordinal"""'}), "(n_bins=self.nb_bins, encode='ordinal')\n", (463, 502), False, 'from sklearn.preprocessing import KBinsDiscretizer\n'), ((522, 598), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(40)', 'max_leaf_nodes': '(150)', 'random_state': '(61)'}), '(n_estimators=40, max_leaf_nodes=150, random_state=61)\n', (544, 598), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1471, 1492), 'numpy.array', 'np.array', (['preds_proba'], {}), '(preds_proba)\n', (1479, 1492), True, 'import numpy as np\n'), ((1854, 1877), 'numpy.sum', 'np.sum', (['weights'], {'axis': '(1)'}), '(weights, axis=1)\n', (1860, 1877), True, 'import numpy as np\n')] |
"""Implementations of the non-parametric bootstrap and the Bayesian bootstrap
for sampling distribution estimation.
References
----------
<NAME>. "Bootstrap Methods: Another Look at the Jackknife". The Annals of
Statistics, Volume 7, Number 1 (1979), 1--26. doi:10.1214/aos/1176344552
<NAME>. "The Bayesian bootstrap". The Annals of Statistics, Volume 9,
Number 1 (1981), 130--134. doi:10.1214/aos/1176345338
"""
import numpy as np
import scipy.stats as st
from ..utils.validation import validate_float
from ..utils.validation import validate_func
from ..utils.validation import validate_int
from ..utils.validation import validate_samples
# Non-parametric bootstrap
def bootstrap(*samples, n_boot, random_state=None, ret_list=False):
"""Generate bootstrap samples by drawing with replacement.
Parameters
----------
samples : sequence of arrays
Sequence of samples from which to draw bootstrap samples.
n_boot : int
Number of bootstrap samples to draw. This is necessarily a keyword
argument.
random_state : numpy.random.RandomState, int, or array-like, optional
If a numpy.random.RandomState object, this is used as the random number
generator for sampling with replacement. Otherwise, this is the seed
for a numpy.random.RandomState object to be used as the random number
generator. This is necessarily a keyword argument.
ret_list : bool, optional
Indicates whether the bootstrap samples should be returned as a list
even if there is only one original sample. This is necessarily a keyword
argument.
Returns
-------
boots : list
List of bootstrap samples drawn from each original sample.
"""
# Validate the samples
samples = validate_samples(*samples, equal_lengths=True, ret_list=True)
n_samples = len(samples)
n_obs = len(samples[0])
# Ensure `n_boot` is a positive integer
n_boot = validate_int(n_boot, "n_boot", minimum=1)
# Initialize the random number generator if necessary
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
# Initialize arrays for the bootstrap samples
boots = [np.empty(((n_boot,) + samples[i].shape)) for i in range(n_samples)]
# Generate the bootstrap samples
for b in range(n_boot):
indices = random_state.randint(0, n_obs, n_obs)
boot = [sample.take(indices, axis=0) for sample in samples]
for i in range(n_samples):
boots[i][b] = boot[i]
# Return the bootstrap samples
if ret_list or n_samples > 1:
return boots
else:
return boots[0]
class Bootstrap(object):
"""Class for generic sampling distribution estimation using the bootstrap.
Properties
----------
n_boot : int
Number of bootstrap samples.
samples_boot : list
A list in which the ith element consists of bootstrap samples drawn from
the ith original sample.
dist : numpy.ndarray
Bootstrap sampling distribution of the statistic.
observed : object
Observed value of the statistic.
"""
n_boot: int = None
samples_boot: list = None
dist: np.ndarray = None
observed = None
def __init__(self, *samples, stat, n_boot, random_state=None, **kwargs):
"""Generate bootstrap estimates by sampling with replacement from a
sample and re-computing the statistic each time.
Parameters
----------
samples: sequence of arrays
Samples on which to perform the bootstrap resampling procedure. Each
array in this sequence should have the same length (i.e., sample
size), but there is no restriction on the shape otherwise.
stat: callable or str
The statistic to compute from the data. If this parameter is a
string, then it should be the name of a NumPy array method (e.g.,
"mean" or "std"). If this parameter is a function, then it should
accept as many arrays (and of the same shape) as are in `samples`.
The statistic is not assumed to be scalar-valued. This parameter is
necessarily a keyword argument.
n_boot : int
Number of bootstrap samples to draw. This is necessarily a keyword
argument.
random_state : numpy.random.RandomState, int, or array-like, optional
If a numpy.random.RandomState object, this is used as the random
number generator for sampling with replacement. Otherwise, this is
the seed for a numpy.random.RandomState object to be used as the
random number generator. This is necessarily a keyword argument.
kwargs: dict, optional
Additional keyword arguments to pass to the function represented by
the parameter `stat`.
"""
# Validate the statistic
stat = validate_func(stat, **kwargs)
# Generate bootstrap samples
self.samples_boot = bootstrap(*samples, n_boot=n_boot,
random_state=random_state, ret_list=True)
# Compute the bootstrap sampling distribution
dist = [stat(*samples) for samples in zip(*self.samples_boot)]
# Store bootstrap sampling distribution and the observed statistic
self.dist = np.asarray(dist)
self.observed = stat(*samples)
# Store the number of bootstrap samples
self.n_boot = n_boot
def var(self):
"""Bootstrap estimate for the variance of the statistic."""
return self.dist.var(axis=0, ddof=0)
def se(self):
"""Bootstrap standard error estimate."""
return self.dist.std(axis=0, ddof=0)
def ci(self, alpha=0.05, kind="normal"):
"""Two-sided bootstrap confidence interval.
Parameter
---------
alpha : float in (0, 1), optional
1 - alpha is the coverage probability of the interval.
kind : "normal" or "pivotal", optional
Specifies the type of bootstrap confidence interval to compute.
Returns
-------
lower : float or numpy.ndarray
Lower endpoint of the confidence interval.
upper : float or numpy.ndarray
Upper endpoint of the confidence interval.
"""
alpha = validate_float(alpha, "alpha", minimum=0.0, maximum=1.0)
if kind == "normal":
z = st.norm(0, 1).ppf(alpha / 2)
se = self.se()
lower = self.observed + z * se
upper = self.observed - z * se
elif kind == "pivotal":
q_lower = np.percentile(self.dist, (100 * (1 - alpha / 2)), axis=0)
q_upper = np.percentile(self.dist, (100 * alpha / 2), axis=0)
lower = 2 * self.observed - q_lower
upper = 2 * self.observed - q_upper
else:
raise ValueError(f"Invalid parameter 'kind': {kind}")
return lower, upper
# Bayesian bootstrap
def bayesian_bootstrap(*samples, n_boot, random_state=None):
"""Generate Bayesian bootstrap posterior distribution estimates.
Parameters
----------
samples : sequence of arrays
Sequence of samples.
n_boot : int
Number of bootstrap samples to draw. This is necessarily a keyword
argument.
random_state : numpy.random.RandomState, int, or array-like, optional
If a numpy.random.RandomState object, this is used as the random number
generator. Otherwise, this is the seed for a numpy.random.RandomState
object to be used as the random number generator. This is necessarily a
keyword argument.
Returns
-------
weights : numpy.ndarray of shape (n_boot, n_observations)
List of posterior distribution estimates.
"""
# Validate the samples
samples = validate_samples(*samples, equal_lengths=True, ret_list=True)
n_obs = len(samples[0])
# Ensure `n_boot` is a positive integer
n_boot = validate_int(n_boot, "n_boot", minimum=1)
# Initialize the random number generator if necessary
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
# Generate Bayesian bootstrap posterior distribution weights
return random_state.dirichlet(np.repeat(1.0, n_obs), size=n_boot)
class BayesianBootstrap(object):
"""Class for generic sampling distribution estimation using the Bayesian
bootstrap.
Properties
----------
n_boot : int
Number of bootstrap samples.
weights : numpy.ndarray
List of Bayesian bootstrap posterior distribution estimates.
dist : numpy.ndarray
Bayesian bootstrap sampling distribution of the statistic.
observed : object
Observed value of the statistic.
"""
n_boot: int
weights: np.ndarray
dist: np.ndarray
observed: object
def __init__(self, *samples, stat, n_boot, random_state=None, **kwargs):
"""Generate a Bayesian bootstrap sampling distribution.
Parameters
----------
samples: sequence of arrays
Samples on which to perform the bootstrap resampling procedure. Each
array in this sequence should have the same length (i.e., sample
size), but there is no restriction on the shape otherwise.
stat: callable or str
The statistic to compute from the data. This must be a function with
signature
stat(*samples, weights, **kwargs)
where the `weights` keyword argument is used to specify the
posterior distribution, and is interpreted as the uniform
distribution if not specified. This is necessarily a keyword
argument.
n_boot : int
Number of bootstrap samples to draw. This is necessarily a keyword
argument.
random_state : numpy.random.RandomState, int, or array-like, optional
If a numpy.random.RandomState object, this is used as the random
number generator. Otherwise, this is the seed for a
numpy.random.RandomState object. This is necessarily a keyword
argument.
kwargs: dict, optional
Additional keyword arguments to pass to the function represented by
the parameter `stat`.
"""
# Validate the statistic
stat = validate_func(stat, **kwargs)
# Generate posterior distributions
self.weights = bayesian_bootstrap(*samples, n_boot=n_boot,
random_state=random_state)
# Compute the bootstrap sampling distribution
dist = [stat(*samples, weights=weight) for weight in self.weights]
# Store bootstrap sampling distribution and the observed statistic
self.dist = np.asarray(dist)
self.observed = stat(*samples)
# Store the number of bootstrap samples
self.n_boot = n_boot
def var(self):
"""Bayesian bootstrap estimate for the variance of the statistic."""
return self.dist.var(axis=0, ddof=0)
def se(self):
"""Bayesian bootstrap standard error estimate."""
return self.dist.std(axis=0, ddof=0)
| [
"scipy.stats.norm",
"numpy.empty",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.percentile",
"numpy.repeat"
] | [((2145, 2180), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (2166, 2180), True, 'import numpy as np\n'), ((2245, 2283), 'numpy.empty', 'np.empty', (['((n_boot,) + samples[i].shape)'], {}), '((n_boot,) + samples[i].shape)\n', (2253, 2283), True, 'import numpy as np\n'), ((5408, 5424), 'numpy.asarray', 'np.asarray', (['dist'], {}), '(dist)\n', (5418, 5424), True, 'import numpy as np\n'), ((8253, 8288), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (8274, 8288), True, 'import numpy as np\n'), ((8389, 8410), 'numpy.repeat', 'np.repeat', (['(1.0)', 'n_obs'], {}), '(1.0, n_obs)\n', (8398, 8410), True, 'import numpy as np\n'), ((10924, 10940), 'numpy.asarray', 'np.asarray', (['dist'], {}), '(dist)\n', (10934, 10940), True, 'import numpy as np\n'), ((6703, 6758), 'numpy.percentile', 'np.percentile', (['self.dist', '(100 * (1 - alpha / 2))'], {'axis': '(0)'}), '(self.dist, 100 * (1 - alpha / 2), axis=0)\n', (6716, 6758), True, 'import numpy as np\n'), ((6783, 6832), 'numpy.percentile', 'np.percentile', (['self.dist', '(100 * alpha / 2)'], {'axis': '(0)'}), '(self.dist, 100 * alpha / 2, axis=0)\n', (6796, 6832), True, 'import numpy as np\n'), ((6507, 6520), 'scipy.stats.norm', 'st.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (6514, 6520), True, 'import scipy.stats as st\n')] |
if '__file__' in globals():
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable, Model
from dezero import setup_variable
from dezero.utils import plot_dot_graph
import dezero.functions as F
from dezero import optimizers
from dezero.models import MLP
setup_variable()
np.random.seed(0)
lr = 0.2
max_iters = 100
class TestOptimizer:
def test_SDG(self):
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)
hidden_size = 10
model = MLP((hidden_size, 1))
optimizer = optimizers.SGD(lr)
optimizer.setup(model)
for i in range(max_iters):
y_pred = model(x)
loss = F.mean_squared_error(y, y_pred)
model.cleargrads()
loss.backward()
optimizer.update()
assert True
def test_MomentumSDG(self):
x = np.random.rand(100, 1)
y = np.sin(2 * np.pi * x) + np.random.rand(100, 1)
hidden_size = 10
model = MLP((hidden_size, 1))
optimizer = optimizers.MomentumSGD(lr)
optimizer.setup(model)
for i in range(max_iters):
y_pred = model(x)
loss = F.mean_squared_error(y, y_pred)
model.cleargrads()
loss.backward()
optimizer.update()
assert True
| [
"dezero.optimizers.MomentumSGD",
"numpy.random.seed",
"dezero.models.MLP",
"os.path.dirname",
"numpy.sin",
"dezero.optimizers.SGD",
"numpy.random.rand",
"dezero.functions.mean_squared_error",
"dezero.setup_variable"
] | [((371, 387), 'dezero.setup_variable', 'setup_variable', ([], {}), '()\n', (385, 387), False, 'from dezero import setup_variable\n'), ((388, 405), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (402, 405), True, 'import numpy as np\n'), ((490, 512), 'numpy.random.rand', 'np.random.rand', (['(100)', '(1)'], {}), '(100, 1)\n', (504, 512), True, 'import numpy as np\n'), ((615, 636), 'dezero.models.MLP', 'MLP', (['(hidden_size, 1)'], {}), '((hidden_size, 1))\n', (618, 636), False, 'from dezero.models import MLP\n'), ((657, 675), 'dezero.optimizers.SGD', 'optimizers.SGD', (['lr'], {}), '(lr)\n', (671, 675), False, 'from dezero import optimizers\n'), ((979, 1001), 'numpy.random.rand', 'np.random.rand', (['(100)', '(1)'], {}), '(100, 1)\n', (993, 1001), True, 'import numpy as np\n'), ((1104, 1125), 'dezero.models.MLP', 'MLP', (['(hidden_size, 1)'], {}), '((hidden_size, 1))\n', (1107, 1125), False, 'from dezero.models import MLP\n'), ((1146, 1172), 'dezero.optimizers.MomentumSGD', 'optimizers.MomentumSGD', (['lr'], {}), '(lr)\n', (1168, 1172), False, 'from dezero import optimizers\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n'), ((525, 546), 'numpy.sin', 'np.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (531, 546), True, 'import numpy as np\n'), ((549, 571), 'numpy.random.rand', 'np.random.rand', (['(100)', '(1)'], {}), '(100, 1)\n', (563, 571), True, 'import numpy as np\n'), ((791, 822), 'dezero.functions.mean_squared_error', 'F.mean_squared_error', (['y', 'y_pred'], {}), '(y, y_pred)\n', (811, 822), True, 'import dezero.functions as F\n'), ((1014, 1035), 'numpy.sin', 'np.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (1020, 1035), True, 'import numpy as np\n'), ((1038, 1060), 'numpy.random.rand', 'np.random.rand', (['(100)', '(1)'], {}), '(100, 1)\n', (1052, 1060), True, 'import numpy as np\n'), ((1288, 1319), 'dezero.functions.mean_squared_error', 'F.mean_squared_error', (['y', 'y_pred'], {}), '(y, y_pred)\n', (1308, 1319), True, 'import dezero.functions as F\n')] |
import cv2
import numpy as np
def nothing(x):
pass
canvas = np.zeros((512, 512, 3), dtype=np.uint8) + 255
cv2.namedWindow("image")
cv2.createTrackbar("R", "image", 0, 255, nothing)
cv2.createTrackbar("G", "image", 0, 255, nothing)
cv2.createTrackbar("B", "image", 0, 255, nothing)
switch = "0:OFF, 1:ON"
cv2.createTrackbar(switch, "image", 0, 1, nothing)
while True:
cv2.imshow("image", canvas)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
r = cv2.getTrackbarPos("R", "image")
g = cv2.getTrackbarPos("G", "image")
b = cv2.getTrackbarPos("B", "image")
s = cv2.getTrackbarPos(switch, "image")
if switch == 0:
canvas[:] = [0, 0, 0]
if s == 1:
canvas[:] = [b, g, r]
cv2.destroyAllWindows()
| [
"cv2.createTrackbar",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.getTrackbarPos",
"cv2.imshow",
"cv2.namedWindow"
] | [((114, 138), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (129, 138), False, 'import cv2\n'), ((140, 189), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""R"""', '"""image"""', '(0)', '(255)', 'nothing'], {}), "('R', 'image', 0, 255, nothing)\n", (158, 189), False, 'import cv2\n'), ((190, 239), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""G"""', '"""image"""', '(0)', '(255)', 'nothing'], {}), "('G', 'image', 0, 255, nothing)\n", (208, 239), False, 'import cv2\n'), ((240, 289), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""B"""', '"""image"""', '(0)', '(255)', 'nothing'], {}), "('B', 'image', 0, 255, nothing)\n", (258, 289), False, 'import cv2\n'), ((313, 363), 'cv2.createTrackbar', 'cv2.createTrackbar', (['switch', '"""image"""', '(0)', '(1)', 'nothing'], {}), "(switch, 'image', 0, 1, nothing)\n", (331, 363), False, 'import cv2\n'), ((729, 752), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (750, 752), False, 'import cv2\n'), ((68, 107), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)'], {'dtype': 'np.uint8'}), '((512, 512, 3), dtype=np.uint8)\n', (76, 107), True, 'import numpy as np\n'), ((381, 408), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'canvas'], {}), "('image', canvas)\n", (391, 408), False, 'import cv2\n'), ((474, 506), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""R"""', '"""image"""'], {}), "('R', 'image')\n", (492, 506), False, 'import cv2\n'), ((515, 547), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""G"""', '"""image"""'], {}), "('G', 'image')\n", (533, 547), False, 'import cv2\n'), ((556, 588), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""B"""', '"""image"""'], {}), "('B', 'image')\n", (574, 588), False, 'import cv2\n'), ((597, 632), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['switch', '"""image"""'], {}), "(switch, 'image')\n", (615, 632), False, 'import cv2\n'), ((416, 430), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (427, 430), False, 'import cv2\n')] |
"""
Module responsible for importing raw RNA-seq per-gene counts and pulling out statistically
significantly different genes. Ideally calls of hits should be done by other methods,
but in their absence raw call (RpMbp) can be used to perform intergoupt calls here.
"""
from collections import defaultdict
from csv import reader
from pprint import PrettyPrinter
import numpy as np
from scipy.stats import t
from bioflow.utils.io_routines import dump_object
from bioflow.utils.log_behavior import get_logger
from bioflow.configs.main_configs import Dumps
from bioflow.neo4j_db import db_io_routines
log = get_logger(__name__)
pre_dict = {1: 0.80,
2: 0.89,
3: 0.92,
4: 0.94,
5: 0.95,
6: 0.96,
7: 0.965,
8: 0.97, } # t-score corrector
estimator_dilatation_table = defaultdict(lambda: 1)
estimator_dilatation_table.update(pre_dict)
def load_rna_counts_table(rna_source, experiments_to_load):
"""
Imports the counts tables from a csv file. The csv file has to contain gene identifiers,
uxon lengths as first two columns and, for each experiment, counts. Lines are genes,
experiments are columns. In addition to importing, performs conversion to the DB inner IDs
from heterogeneous identifiers
:param rna_source: path towards the file where the raw RNA_seq counts are stored
:param experiments_to_load: number of experiments we want to account retrieve
"""
gene_names = []
uxon_lengths = []
table = []
log.info('loading RNA counts table from %s', rna_source)
with open(rna_source, 'rt') as source_file:
rdr = reader(source_file, 'excel-tab')
next(rdr) # skipping the headers
for row in rdr:
gene_names.append(np.array([row[0], '']))
counts_for_gene = [float(f)
for f in row[2: experiments_to_load + 2]]
table.append(np.array(counts_for_gene, dtype=np.float64))
uxon_lengths.append(np.array([float(row[1])]))
gene_names = np.array(gene_names)
table = np.array(table)
uxon_lengths = np.array(uxon_lengths)
return gene_names, uxon_lengths, table
def counts_filter(table, experiment_groups, filter_level):
"""
Generates a boolean array that filters out all the groups where counts are below defined level
:param table: table of counts; line is gene, row is experiment
:param experiment_groups: groups experiments into repeats
:param filter_level: minimum amount of counts in any experience for which we are going to
accept a gene
:return: a boolean array which is True if Gene passed that test
"""
minima = np.zeros((table.shape[0], len(experiment_groups)))
for i, group in enumerate(experiment_groups):
minima[:, i] = np.min(table[:, group], axis=1)
filter_mask = np.max(minima, axis=1) > filter_level - 1
return filter_mask
def convert_to_rpkm(uxon_length, table):
"""
Translates counts to the RPKMs
:param uxon_length: vector of gene lengths
:param table: table of counts
:return: table of RPKMs
"""
total_series_reads = np.sum(table, axis=0)
re_table = table / \
total_series_reads[np.newaxis, :] / uxon_length[:, 0][:, np.newaxis] * 10e12
return re_table
def significantly_different_genes(
rpkm_table,
experiment_groups,
intergroups,
target_p_value=0.05):
"""
Performs a test that uses the error function to determine if we can reject the hypothesis that
all the genes are sampled from the same distribution.
:param rpkm_table: table of the rpkm values
:param experiment_groups: groups on indexes
:param intergroups: the groups between which we want to do the comparisons
:param target_p_value: p_value with which we want to be able to reject the null hypothesis
"""
groups_means = np.zeros((rpkm_table.shape[0], len(experiment_groups)))
groups_var = np.zeros((rpkm_table.shape[0], len(experiment_groups)))
for i, group in enumerate(experiment_groups):
groups_means[:, i] = np.mean(rpkm_table[:, group], axis=1)
groups_var[:, i] = np.var(rpkm_table[:, group], axis=1) / \
estimator_dilatation_table[len(group)] ** 2
group_comparison = []
for bi_group in intergroups:
groups_mean_difference = np.fabs(
groups_means[
:,
bi_group[0]] -
groups_means[
:,
bi_group[1]])
groups_combined_std = np.sqrt(
groups_var[
:,
bi_group[0]] +
groups_var[
:,
bi_group[1]])
p_val = t.sf(groups_mean_difference /
groups_combined_std, (len(experiment_groups[bi_group[0]]) +
len(experiment_groups[bi_group[1]])) /
2)
sorted_p_vals = np.sort(p_val, axis=0)
lower_index = np.array(list(range(0, sorted_p_vals.shape[0]))) *\
target_p_value / sorted_p_vals.shape[0]
pre_filter_mask = sorted_p_vals <= lower_index
filter_mask = pre_filter_mask
if np.any(pre_filter_mask):
refined_threshold = np.max(sorted_p_vals[pre_filter_mask])
filter_mask = p_val < refined_threshold
group_comparison.append((p_val, filter_mask))
return group_comparison
def run_analysis_suite(
rna_source,
no_of_experiments,
experimental_groups,
groups_to_compare,
count_filter_level=5,
false_discovery_rate=0.05):
"""
Imports counts table, runs test suite and stores the result of statistical analysis for further
computation. returns stored values to the standard output.
:param rna_source: the file from which the raw counts are to be read
:param no_of_experiments: number of experiments
:param experimental_groups: experiment groupings
:param groups_to_compare: groups to be compared
:param count_filter_level: minimum counts to run statistics
:param false_discovery_rate: desired false discovery rate
"""
names, lengths, counts = load_rna_counts_table(
rna_source, no_of_experiments)
_, _, names[:, 1] = db_io_routines.look_up_annotation_set(names[
:, 0].tolist())
filter_mask = counts_filter(
counts,
experimental_groups,
filter_level=count_filter_level)
names = names[filter_mask, :]
lengths = lengths[filter_mask, :]
counts = counts[filter_mask, :]
rpkms = convert_to_rpkm(lengths, counts)
testres = significantly_different_genes(
rpkms,
experimental_groups,
groups_to_compare,
false_discovery_rate)
filter_masks = [test_[1] for test_ in testres]
dump_object(Dumps.RNA_seq_counts_compare, filter_masks)
return filter_masks
if __name__ == "__main__":
pp = PrettyPrinter(indent=4)
exp_groups = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
test_groups_to_compare = [[0, 1], [0, 2]]
rna_source = "/home/ank/Documents/External_Predictions/Ben_RNA_seq/counts.tsv"
run_analysis_suite(
rna_source,
9,
exp_groups,
test_groups_to_compare,
10,
0.05)
| [
"numpy.sum",
"csv.reader",
"bioflow.utils.log_behavior.get_logger",
"collections.defaultdict",
"pprint.PrettyPrinter",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.mean",
"numpy.fabs",
"numpy.sort",
"bioflow.utils.io_routines.dump_object",
"numpy.any",
"numpy.var",
"numpy.sqrt"
] | [((606, 626), 'bioflow.utils.log_behavior.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (616, 626), False, 'from bioflow.utils.log_behavior import get_logger\n'), ((851, 874), 'collections.defaultdict', 'defaultdict', (['(lambda : 1)'], {}), '(lambda : 1)\n', (862, 874), False, 'from collections import defaultdict\n'), ((2069, 2089), 'numpy.array', 'np.array', (['gene_names'], {}), '(gene_names)\n', (2077, 2089), True, 'import numpy as np\n'), ((2102, 2117), 'numpy.array', 'np.array', (['table'], {}), '(table)\n', (2110, 2117), True, 'import numpy as np\n'), ((2137, 2159), 'numpy.array', 'np.array', (['uxon_lengths'], {}), '(uxon_lengths)\n', (2145, 2159), True, 'import numpy as np\n'), ((3172, 3193), 'numpy.sum', 'np.sum', (['table'], {'axis': '(0)'}), '(table, axis=0)\n', (3178, 3193), True, 'import numpy as np\n'), ((6919, 6974), 'bioflow.utils.io_routines.dump_object', 'dump_object', (['Dumps.RNA_seq_counts_compare', 'filter_masks'], {}), '(Dumps.RNA_seq_counts_compare, filter_masks)\n', (6930, 6974), False, 'from bioflow.utils.io_routines import dump_object\n'), ((7038, 7061), 'pprint.PrettyPrinter', 'PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (7051, 7061), False, 'from pprint import PrettyPrinter\n'), ((1657, 1689), 'csv.reader', 'reader', (['source_file', '"""excel-tab"""'], {}), "(source_file, 'excel-tab')\n", (1663, 1689), False, 'from csv import reader\n'), ((2827, 2858), 'numpy.min', 'np.min', (['table[:, group]'], {'axis': '(1)'}), '(table[:, group], axis=1)\n', (2833, 2858), True, 'import numpy as np\n'), ((2877, 2899), 'numpy.max', 'np.max', (['minima'], {'axis': '(1)'}), '(minima, axis=1)\n', (2883, 2899), True, 'import numpy as np\n'), ((4131, 4168), 'numpy.mean', 'np.mean', (['rpkm_table[:, group]'], {'axis': '(1)'}), '(rpkm_table[:, group], axis=1)\n', (4138, 4168), True, 'import numpy as np\n'), ((4386, 4454), 'numpy.fabs', 'np.fabs', (['(groups_means[:, bi_group[0]] - groups_means[:, bi_group[1]])'], {}), '(groups_means[:, bi_group[0]] - groups_means[:, bi_group[1]])\n', (4393, 4454), True, 'import numpy as np\n'), ((4576, 4640), 'numpy.sqrt', 'np.sqrt', (['(groups_var[:, bi_group[0]] + groups_var[:, bi_group[1]])'], {}), '(groups_var[:, bi_group[0]] + groups_var[:, bi_group[1]])\n', (4583, 4640), True, 'import numpy as np\n'), ((4989, 5011), 'numpy.sort', 'np.sort', (['p_val'], {'axis': '(0)'}), '(p_val, axis=0)\n', (4996, 5011), True, 'import numpy as np\n'), ((5242, 5265), 'numpy.any', 'np.any', (['pre_filter_mask'], {}), '(pre_filter_mask)\n', (5248, 5265), True, 'import numpy as np\n'), ((4196, 4232), 'numpy.var', 'np.var', (['rpkm_table[:, group]'], {'axis': '(1)'}), '(rpkm_table[:, group], axis=1)\n', (4202, 4232), True, 'import numpy as np\n'), ((5299, 5337), 'numpy.max', 'np.max', (['sorted_p_vals[pre_filter_mask]'], {}), '(sorted_p_vals[pre_filter_mask])\n', (5305, 5337), True, 'import numpy as np\n'), ((1786, 1808), 'numpy.array', 'np.array', (["[row[0], '']"], {}), "([row[0], ''])\n", (1794, 1808), True, 'import numpy as np\n'), ((1948, 1991), 'numpy.array', 'np.array', (['counts_for_gene'], {'dtype': 'np.float64'}), '(counts_for_gene, dtype=np.float64)\n', (1956, 1991), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
import numpy as np
import random
import cv2
# render a go board into a matrix.
# state should be a string of 9x9, 13x13, 19x19.
# size is the dimensions of the resulting image.
def render_board(state, size=500):
margin = size // 10
r = np.zeros((size,size,3), np.uint8)
r[:] = (163, 218, 255)
dims = 19
if len(state) == 81:
dims = 9
elif len(state) == 169:
dims = 13
grid_size = (size - 2*margin) / (dims-1)
for i in range(dims):
r = cv2.line(r, (int(margin + i*grid_size), margin), (int(margin + i * grid_size), size - margin), (0, 0, 0), 1)
for i in range(dims):
r = cv2.line(r, (margin, int(margin + i*grid_size)), (size - margin, int(margin + i*grid_size)), (0, 0, 0), 1)
# for 9x9, there is a star point in the middle
if dims == 9:
r = cv2.circle(r, (int(margin + 4 * grid_size), int(margin + 4*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
if dims == 13:
r = cv2.circle(r, (int(margin + 3 * grid_size), int(margin + 3*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 9 * grid_size), int(margin + 9*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 3 * grid_size), int(margin + 9*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 9 * grid_size), int(margin + 3*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 6 * grid_size), int(margin + 6*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
if dims == 19:
r = cv2.circle(r, (int(margin + 3 * grid_size), int(margin + 3*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 15 * grid_size), int(margin + 15*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 3 * grid_size), int(margin + 15*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 15 * grid_size), int(margin + 3*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 9 * grid_size), int(margin + 9 * grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 9 * grid_size), int(margin + 3*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 9 * grid_size), int(margin + 15*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 3 * grid_size), int(margin + 9*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
r = cv2.circle(r, (int(margin + 15 * grid_size), int(margin + 9*grid_size)), int(grid_size / 8), (0, 0, 0), -1)
# iterate through state, w is a white stone, b is a black stone, and anything else is blank
for i, c in enumerate(state):
gx = i % dims
gy = i // dims
if c == 'w':
r = cv2.circle(r, (int(margin + gx * grid_size), int(margin + gy * grid_size)), int(grid_size * 0.45), (255, 255, 255), -1)
r = cv2.circle(r, (int(margin + gx * grid_size), int(margin + gy * grid_size)), int(grid_size * 0.45), (0, 0, 0), 1)
elif c == 'b':
r = cv2.circle(r, (int(margin + gx * grid_size), int(margin + gy * grid_size)), int(grid_size * 0.45), (0, 0, 0), -1)
return r
if __name__ == "__main__":
while True:
inp = ""
for i in range(random.choice((9*9, 13*13, 19*19))):
inp += random.choice(("b", "w", "_"))
x = render_board(inp, size=1000)
cv2.imshow("board", x)
while True:
if cv2.waitKey(0) & 0xFF == ord('n'):
break
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
quit()
| [
"cv2.waitKey",
"cv2.imshow",
"numpy.zeros",
"random.choice",
"cv2.destroyAllWindows"
] | [((270, 305), 'numpy.zeros', 'np.zeros', (['(size, size, 3)', 'np.uint8'], {}), '((size, size, 3), np.uint8)\n', (278, 305), True, 'import numpy as np\n'), ((3523, 3545), 'cv2.imshow', 'cv2.imshow', (['"""board"""', 'x'], {}), "('board', x)\n", (3533, 3545), False, 'import cv2\n'), ((3387, 3427), 'random.choice', 'random.choice', (['(9 * 9, 13 * 13, 19 * 19)'], {}), '((9 * 9, 13 * 13, 19 * 19))\n', (3400, 3427), False, 'import random\n'), ((3443, 3473), 'random.choice', 'random.choice', (["('b', 'w', '_')"], {}), "(('b', 'w', '_'))\n", (3456, 3473), False, 'import random\n'), ((3704, 3727), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3725, 3727), False, 'import cv2\n'), ((3581, 3595), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3592, 3595), False, 'import cv2\n'), ((3653, 3667), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3664, 3667), False, 'import cv2\n')] |
#!/usr/bin/envv python
import glob
import os
import subprocess
import click
import numpy as np
import sh
from loguru import logger
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
logger.add("process_seed.log", format="{time} {level} {message}",
filter="process_seed", level="INFO")
os.putenv("SAC_DISPLAY_COPYRIGHT", '0')
def process_seeds(event_paths):
event_path_this_rank = generate_paths(event_paths)
# comment as having done this procedure
logger.info(f"[rank:{rank}] start to rdseed")
rdseed(event_path_this_rank)
logger.info(f"[rank:{rank}] start to merge")
merge(event_path_this_rank)
logger.info(f"[rank:{rank}] start to rename")
rename(event_path_this_rank)
logger.info(f"[rank:{rank}] start to transfer")
transfer(event_path_this_rank)
logger.info(f"[rank:{rank}] start to rotate")
rotate(event_path_this_rank)
logger.success(f"[rank:{rank}] finished!")
def generate_paths(event_paths):
return np.array_split(event_paths, size)[rank]
def rdseed(event_path_this_rank):
root_path = str(sh.pwd())[:-1]
for thedir in event_path_this_rank:
sh.cd(thedir)
for seed in glob.glob("*SEED"):
logger.info(f"[rank:{rank},dir:{thedir}] rdseed {seed}")
sh.rdseed('-pdf', seed)
sh.cd(root_path)
def merge(event_path_this_rank):
root_path = str(sh.pwd())[:-1]
for thedir in event_path_this_rank:
sh.cd(thedir)
sets = {}
for fname in glob.glob("*.SAC"):
key = '.'.join(fname.split('.')[6:10])
if key not in sets:
sets[key] = 1
else:
sets[key] += 1
# prepare sac command
stdin_list = []
stdin_list.append(f"wild echo off \n")
to_del = []
for key, value in sets.items():
if(value == 1):
continue
logger.info(
f"[rank:{rank},dir:{thedir}] merge {key}: {value} traces")
traces = sorted(glob.glob('.'.join(['*', key, '?', 'SAC'])))
stdin_list.append(f"r *.{key}.?.SAC \n")
stdin_list.append(f"merge gap zero overlap average \n")
stdin_list.append(f"w {traces[0]} \n")
to_del.extend(traces[1:])
stdin_list.append(f"q\n")
sh.sac(_in=stdin_list)
logger.info(f"[rank:{rank},dir:{thedir}] keep only the master seed")
for file in to_del:
sh.rm(file)
sh.cd(root_path)
def rename(event_path_this_rank):
root_path = str(sh.pwd())[:-1]
for thedir in event_path_this_rank:
sh.cd(thedir)
for fname in glob.glob("*.SAC"):
net, sta, loc, chn = fname.split('.')[6:10]
# logger.info(
# f"[rank:{rank},dir:{thedir}] rename {fname} to {net}.{sta}.{loc}.{chn}.SAC")
sh.mv(fname, f"{net}.{sta}.{loc}.{chn}.SAC")
sh.cd(root_path)
def transfer(event_path_this_rank):
root_path = str(sh.pwd())[:-1]
for thedir in event_path_this_rank:
sh.cd(thedir)
stdin_list = []
for sacfile in glob.glob("*.SAC"):
net, sta, loc, chn = sacfile.split('.')[0:4]
pz = glob.glob(f"SAC_PZs_{net}_{sta}_{chn}_{loc}_*_*")
if(len(pz) != 1):
logger.error(
f"[rank:{rank},dir:{thedir}] error in transfering for {sacfile} in seeking {pz}")
continue
# logger.info(
# f"[rank:{rank},dir:{thedir}] transfer {sacfile} with {pz}")
stdin_list.append(f"r {sacfile}\n")
stdin_list.append(f"rmean; rtr; taper \n")
stdin_list.append(
f"trans from pol s {pz[0]} to none freq 0.001 0.005 5 10\n")
stdin_list.append(f"mul 1.0e9 \n")
stdin_list.append("w over\n")
stdin_list.append(f"q\n")
sh.sac(_in=stdin_list)
sh.cd(root_path)
def rotate(event_path_this_rank):
root_path = str(sh.pwd())[:-1]
for thedir in event_path_this_rank:
sh.cd(thedir)
# build the collection of NET.STA.LOC.CH
sets = set()
for fname in glob.glob("*.SAC"):
net, sta, loc, chn = fname.split('.')[0:4]
key = '.'.join([net, sta, loc, chn[0:2]])
sets.add(key)
stdin_list = []
for key in sets:
# if Z component exists
Z = f"{key}Z.SAC"
if not os.path.exists(Z):
logger.error(
f"[rank:{rank},dir:{thedir}] vertical component missing for {key}")
continue
# if horizontal exists
if os.path.exists(key + "E.SAC") and os.path.exists(key + "N.SAC"):
E = key + "E.SAC"
N = key + "N.SAC"
elif os.path.exists(key + "1.SAC") and os.path.exists(key + "2.SAC"):
E = key + "1.SAC"
N = key + "2.SAC"
else:
logger.error(
f"[rank:{rank},dir:{thedir}] horizontal component missing for {key}")
continue
# check if orthogonal
cmd = 'saclst cmpaz f {}'.format(E).split()
Ecmpaz = subprocess.check_output(cmd).decode().split()[1]
cmd = 'saclst cmpaz f {}'.format(N).split()
Ncmpaz = subprocess.check_output(cmd).decode().split()[1]
cmpaz_delta = abs(float(Ecmpaz) - float(Ncmpaz))
if not (abs(cmpaz_delta-90) <= 0.01 or abs(cmpaz_delta-270) <= 0.01):
logger.error(
f"[rank:{rank},dir:{thedir}] {key}: cmpaz1={Ecmpaz}, cmpaz2={Ncmpaz} are not orthogonal!")
continue
# check B,E,Delta
cmd = 'saclst b e delta f {}'.format(Z).split()
Zb, Ze, Zdelta = subprocess.check_output(cmd).decode().split()[1:]
cmd = 'saclst b e delta f {}'.format(E).split()
Eb, Ee, Edelta = subprocess.check_output(cmd).decode().split()[1:]
cmd = 'saclst b e delta f {}'.format(N).split()
Nb, Ne, Ndelta = subprocess.check_output(cmd).decode().split()[1:]
if not (float(Zdelta) == float(Edelta) and float(Zdelta) == float(Ndelta)):
logger.error(
f"[rank:{rank},dir:{thedir}] {key}: {key} delta not equal! ")
continue
# get longest data window
begin = max(float(Zb), float(Eb), float(Nb))
end = min(float(Ze), float(Ee), float(Ne))
# output with the form NET.STA.LOC.[RTZ]
prefix = key[:-2]
R, T, Z0 = prefix + '.R', prefix + '.T', prefix + '.Z'
# logger.info(f"[rank:{rank},dir:{thedir}] rotate {key}")
stdin_list.append(f"cut {begin} {end} \n")
stdin_list.append(f"r {E} {N} \n")
stdin_list.append(f"rotate to gcp \n")
stdin_list.append(f"w {R} {T} \n")
stdin_list.append(f"r {Z} \n")
stdin_list.append(f"w {Z0} \n")
stdin_list.append(f"q \n")
sh.sac(_in=stdin_list)
# delete initial files
for fname in glob.glob("*.SAC"):
logger.info(
f"[rank:{rank},dir:{thedir}] delete SAC file for {key}")
sh.rm(fname)
sh.cd(root_path)
@click.command()
@click.option('--main_path', required=True, help="the data directory", type=str)
def main(main_path):
paths = glob.glob(f"{main_path}/*")
process_seeds(paths)
if __name__ == "__main__":
main()
| [
"os.putenv",
"sh.mv",
"sh.cd",
"loguru.logger.add",
"sh.sac",
"sh.rdseed",
"loguru.logger.error",
"subprocess.check_output",
"click.option",
"sh.pwd",
"os.path.exists",
"click.command",
"loguru.logger.info",
"sh.rm",
"loguru.logger.success",
"glob.glob",
"numpy.array_split"
] | [((225, 332), 'loguru.logger.add', 'logger.add', (['"""process_seed.log"""'], {'format': '"""{time} {level} {message}"""', 'filter': '"""process_seed"""', 'level': '"""INFO"""'}), "('process_seed.log', format='{time} {level} {message}', filter=\n 'process_seed', level='INFO')\n", (235, 332), False, 'from loguru import logger\n'), ((339, 378), 'os.putenv', 'os.putenv', (['"""SAC_DISPLAY_COPYRIGHT"""', '"""0"""'], {}), "('SAC_DISPLAY_COPYRIGHT', '0')\n", (348, 378), False, 'import os\n'), ((7388, 7403), 'click.command', 'click.command', ([], {}), '()\n', (7401, 7403), False, 'import click\n'), ((7405, 7484), 'click.option', 'click.option', (['"""--main_path"""'], {'required': '(True)', 'help': '"""the data directory"""', 'type': 'str'}), "('--main_path', required=True, help='the data directory', type=str)\n", (7417, 7484), False, 'import click\n'), ((517, 562), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank}] start to rdseed"""'], {}), "(f'[rank:{rank}] start to rdseed')\n", (528, 562), False, 'from loguru import logger\n'), ((601, 645), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank}] start to merge"""'], {}), "(f'[rank:{rank}] start to merge')\n", (612, 645), False, 'from loguru import logger\n'), ((683, 728), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank}] start to rename"""'], {}), "(f'[rank:{rank}] start to rename')\n", (694, 728), False, 'from loguru import logger\n'), ((767, 814), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank}] start to transfer"""'], {}), "(f'[rank:{rank}] start to transfer')\n", (778, 814), False, 'from loguru import logger\n'), ((855, 900), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank}] start to rotate"""'], {}), "(f'[rank:{rank}] start to rotate')\n", (866, 900), False, 'from loguru import logger\n'), ((939, 981), 'loguru.logger.success', 'logger.success', (['f"""[rank:{rank}] finished!"""'], {}), "(f'[rank:{rank}] finished!')\n", (953, 981), False, 'from loguru import logger\n'), ((7518, 7545), 'glob.glob', 'glob.glob', (['f"""{main_path}/*"""'], {}), "(f'{main_path}/*')\n", (7527, 7545), False, 'import glob\n'), ((1028, 1061), 'numpy.array_split', 'np.array_split', (['event_paths', 'size'], {}), '(event_paths, size)\n', (1042, 1061), True, 'import numpy as np\n'), ((1187, 1200), 'sh.cd', 'sh.cd', (['thedir'], {}), '(thedir)\n', (1192, 1200), False, 'import sh\n'), ((1221, 1239), 'glob.glob', 'glob.glob', (['"""*SEED"""'], {}), "('*SEED')\n", (1230, 1239), False, 'import glob\n'), ((1354, 1370), 'sh.cd', 'sh.cd', (['root_path'], {}), '(root_path)\n', (1359, 1370), False, 'import sh\n'), ((1489, 1502), 'sh.cd', 'sh.cd', (['thedir'], {}), '(thedir)\n', (1494, 1502), False, 'import sh\n'), ((1543, 1561), 'glob.glob', 'glob.glob', (['"""*.SAC"""'], {}), "('*.SAC')\n", (1552, 1561), False, 'import glob\n'), ((2369, 2391), 'sh.sac', 'sh.sac', ([], {'_in': 'stdin_list'}), '(_in=stdin_list)\n', (2375, 2391), False, 'import sh\n'), ((2401, 2469), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank},dir:{thedir}] keep only the master seed"""'], {}), "(f'[rank:{rank},dir:{thedir}] keep only the master seed')\n", (2412, 2469), False, 'from loguru import logger\n'), ((2531, 2547), 'sh.cd', 'sh.cd', (['root_path'], {}), '(root_path)\n', (2536, 2547), False, 'import sh\n'), ((2667, 2680), 'sh.cd', 'sh.cd', (['thedir'], {}), '(thedir)\n', (2672, 2680), False, 'import sh\n'), ((2702, 2720), 'glob.glob', 'glob.glob', (['"""*.SAC"""'], {}), "('*.SAC')\n", (2711, 2720), False, 'import glob\n'), ((2965, 2981), 'sh.cd', 'sh.cd', (['root_path'], {}), '(root_path)\n', (2970, 2981), False, 'import sh\n'), ((3103, 3116), 'sh.cd', 'sh.cd', (['thedir'], {}), '(thedir)\n', (3108, 3116), False, 'import sh\n'), ((3165, 3183), 'glob.glob', 'glob.glob', (['"""*.SAC"""'], {}), "('*.SAC')\n", (3174, 3183), False, 'import glob\n'), ((3945, 3967), 'sh.sac', 'sh.sac', ([], {'_in': 'stdin_list'}), '(_in=stdin_list)\n', (3951, 3967), False, 'import sh\n'), ((3976, 3992), 'sh.cd', 'sh.cd', (['root_path'], {}), '(root_path)\n', (3981, 3992), False, 'import sh\n'), ((4112, 4125), 'sh.cd', 'sh.cd', (['thedir'], {}), '(thedir)\n', (4117, 4125), False, 'import sh\n'), ((4218, 4236), 'glob.glob', 'glob.glob', (['"""*.SAC"""'], {}), "('*.SAC')\n", (4227, 4236), False, 'import glob\n'), ((7140, 7162), 'sh.sac', 'sh.sac', ([], {'_in': 'stdin_list'}), '(_in=stdin_list)\n', (7146, 7162), False, 'import sh\n'), ((7216, 7234), 'glob.glob', 'glob.glob', (['"""*.SAC"""'], {}), "('*.SAC')\n", (7225, 7234), False, 'import glob\n'), ((7368, 7384), 'sh.cd', 'sh.cd', (['root_path'], {}), '(root_path)\n', (7373, 7384), False, 'import sh\n'), ((1124, 1132), 'sh.pwd', 'sh.pwd', ([], {}), '()\n', (1130, 1132), False, 'import sh\n'), ((1253, 1309), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank},dir:{thedir}] rdseed {seed}"""'], {}), "(f'[rank:{rank},dir:{thedir}] rdseed {seed}')\n", (1264, 1309), False, 'from loguru import logger\n'), ((1322, 1345), 'sh.rdseed', 'sh.rdseed', (['"""-pdf"""', 'seed'], {}), "('-pdf', seed)\n", (1331, 1345), False, 'import sh\n'), ((1426, 1434), 'sh.pwd', 'sh.pwd', ([], {}), '()\n', (1432, 1434), False, 'import sh\n'), ((1954, 2024), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank},dir:{thedir}] merge {key}: {value} traces"""'], {}), "(f'[rank:{rank},dir:{thedir}] merge {key}: {value} traces')\n", (1965, 2024), False, 'from loguru import logger\n'), ((2510, 2521), 'sh.rm', 'sh.rm', (['file'], {}), '(file)\n', (2515, 2521), False, 'import sh\n'), ((2604, 2612), 'sh.pwd', 'sh.pwd', ([], {}), '()\n', (2610, 2612), False, 'import sh\n'), ((2912, 2956), 'sh.mv', 'sh.mv', (['fname', 'f"""{net}.{sta}.{loc}.{chn}.SAC"""'], {}), "(fname, f'{net}.{sta}.{loc}.{chn}.SAC')\n", (2917, 2956), False, 'import sh\n'), ((3040, 3048), 'sh.pwd', 'sh.pwd', ([], {}), '()\n', (3046, 3048), False, 'import sh\n'), ((3259, 3308), 'glob.glob', 'glob.glob', (['f"""SAC_PZs_{net}_{sta}_{chn}_{loc}_*_*"""'], {}), "(f'SAC_PZs_{net}_{sta}_{chn}_{loc}_*_*')\n", (3268, 3308), False, 'import glob\n'), ((4049, 4057), 'sh.pwd', 'sh.pwd', ([], {}), '()\n', (4055, 4057), False, 'import sh\n'), ((7248, 7316), 'loguru.logger.info', 'logger.info', (['f"""[rank:{rank},dir:{thedir}] delete SAC file for {key}"""'], {}), "(f'[rank:{rank},dir:{thedir}] delete SAC file for {key}')\n", (7259, 7316), False, 'from loguru import logger\n'), ((7346, 7358), 'sh.rm', 'sh.rm', (['fname'], {}), '(fname)\n', (7351, 7358), False, 'import sh\n'), ((3356, 3460), 'loguru.logger.error', 'logger.error', (['f"""[rank:{rank},dir:{thedir}] error in transfering for {sacfile} in seeking {pz}"""'], {}), "(\n f'[rank:{rank},dir:{thedir}] error in transfering for {sacfile} in seeking {pz}'\n )\n", (3368, 3460), False, 'from loguru import logger\n'), ((4508, 4525), 'os.path.exists', 'os.path.exists', (['Z'], {}), '(Z)\n', (4522, 4525), False, 'import os\n'), ((4543, 4628), 'loguru.logger.error', 'logger.error', (['f"""[rank:{rank},dir:{thedir}] vertical component missing for {key}"""'], {}), "(f'[rank:{rank},dir:{thedir}] vertical component missing for {key}'\n )\n", (4555, 4628), False, 'from loguru import logger\n'), ((4721, 4750), 'os.path.exists', 'os.path.exists', (["(key + 'E.SAC')"], {}), "(key + 'E.SAC')\n", (4735, 4750), False, 'import os\n'), ((4755, 4784), 'os.path.exists', 'os.path.exists', (["(key + 'N.SAC')"], {}), "(key + 'N.SAC')\n", (4769, 4784), False, 'import os\n'), ((5613, 5726), 'loguru.logger.error', 'logger.error', (['f"""[rank:{rank},dir:{thedir}] {key}: cmpaz1={Ecmpaz}, cmpaz2={Ncmpaz} are not orthogonal!"""'], {}), "(\n f'[rank:{rank},dir:{thedir}] {key}: cmpaz1={Ecmpaz}, cmpaz2={Ncmpaz} are not orthogonal!'\n )\n", (5625, 5726), False, 'from loguru import logger\n'), ((6316, 6390), 'loguru.logger.error', 'logger.error', (['f"""[rank:{rank},dir:{thedir}] {key}: {key} delta not equal! """'], {}), "(f'[rank:{rank},dir:{thedir}] {key}: {key} delta not equal! ')\n", (6328, 6390), False, 'from loguru import logger\n'), ((4871, 4900), 'os.path.exists', 'os.path.exists', (["(key + '1.SAC')"], {}), "(key + '1.SAC')\n", (4885, 4900), False, 'import os\n'), ((4905, 4934), 'os.path.exists', 'os.path.exists', (["(key + '2.SAC')"], {}), "(key + '2.SAC')\n", (4919, 4934), False, 'import os\n'), ((5038, 5125), 'loguru.logger.error', 'logger.error', (['f"""[rank:{rank},dir:{thedir}] horizontal component missing for {key}"""'], {}), "(\n f'[rank:{rank},dir:{thedir}] horizontal component missing for {key}')\n", (5050, 5125), False, 'from loguru import logger\n'), ((5279, 5307), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (5302, 5307), False, 'import subprocess\n'), ((5405, 5433), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (5428, 5433), False, 'import subprocess\n'), ((5883, 5911), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (5906, 5911), False, 'import subprocess\n'), ((6022, 6050), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (6045, 6050), False, 'import subprocess\n'), ((6161, 6189), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (6184, 6189), False, 'import subprocess\n')] |
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class BanditEnv(gym.Env):
"""
Bandit environment base
Attributes
----------
arms: int
Number of arms
"""
def __init__(self, arms: int):
self.arms = arms
self.action_space = spaces.Discrete(self.arms)
self.observation_space = spaces.Discrete(1)
self.np_random = None
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
raise NotImplementedError
def step(self, action: int):
raise NotImplementedError
def render(self, mode="human", close=False):
raise NotImplementedError
class BanditKArmedGaussianEnv(BanditEnv):
"""
K-armed bandit environment.
This env is mentioned on page 30 of Sutton and Barto's with K=10
[Reinforcement Learning: An Introduction]
(https://www.dropbox.com/s/b3psxv2r0ccmf80/book2015oct.pdf?dl=0)
Actions always pay out
Mean of payout is pulled from a normal distribution (0, 1) (called q*(a))
Actual reward is drawn from a normal distribution (q*(a), 1)
"""
def __init__(self, arms=10):
self.means = []
BanditEnv.__init__(self, arms)
def reset(self):
self.means = []
for _ in range(self.arms):
self.means.append(self.np_random.normal(0, 1))
self.best_arm = np.argmax(self.means)
def step(self, action: int):
assert self.action_space.contains(action)
reward = self.np_random.normal(self.means[action], 1)
info = {
"Optimal": action == self.best_arm,
"Regret": self.means[self.best_arm] - self.means[action],
}
return 0, reward, False, info
| [
"gym.spaces.Discrete",
"numpy.argmax",
"gym.utils.seeding.np_random"
] | [((312, 338), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.arms'], {}), '(self.arms)\n', (327, 338), False, 'from gym import spaces\n'), ((372, 390), 'gym.spaces.Discrete', 'spaces.Discrete', (['(1)'], {}), '(1)\n', (387, 390), False, 'from gym import spaces\n'), ((525, 548), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (542, 548), False, 'from gym.utils import seeding\n'), ((1488, 1509), 'numpy.argmax', 'np.argmax', (['self.means'], {}), '(self.means)\n', (1497, 1509), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.