body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
d3843909a320f625092614066cfb6626708939e0ab4a6c759db1b64f87252b16 | def write_hdf5_dict(filepath, data_dict, chunks=None):
'\n Stores data_dict\n Parameters\n ----------\n filepath : str\n file path where data will be stored. (Do not include extension- .h5)\n data_dict : dictionary\n data should be stored as data_dict[key]= data_arrays\n\n Returns\n -------\n\n '
filedir = os.path.split(filepath)[0]
if (not os.path.exists(filedir)):
os.makedirs(filedir)
ext = '.h5'
filename = (filepath + ext)
hf = h5py.File(filename, 'w')
for key in data_dict:
if ((chunks is None) or ((key == 'x') or (key == 'y') or (key == 'z'))):
hf.create_dataset(key, data=data_dict[key])
else:
hf.create_dataset(key, data=data_dict[key], chunks=chunks)
hf.close()
print(('Data was successfully saved as ' + filename)) | Stores data_dict
Parameters
----------
filepath : str
file path where data will be stored. (Do not include extension- .h5)
data_dict : dictionary
data should be stored as data_dict[key]= data_arrays
Returns
------- | davis2hdf5.py | write_hdf5_dict | tmatsuzawa/tflow | 1 | python | def write_hdf5_dict(filepath, data_dict, chunks=None):
'\n Stores data_dict\n Parameters\n ----------\n filepath : str\n file path where data will be stored. (Do not include extension- .h5)\n data_dict : dictionary\n data should be stored as data_dict[key]= data_arrays\n\n Returns\n -------\n\n '
filedir = os.path.split(filepath)[0]
if (not os.path.exists(filedir)):
os.makedirs(filedir)
ext = '.h5'
filename = (filepath + ext)
hf = h5py.File(filename, 'w')
for key in data_dict:
if ((chunks is None) or ((key == 'x') or (key == 'y') or (key == 'z'))):
hf.create_dataset(key, data=data_dict[key])
else:
hf.create_dataset(key, data=data_dict[key], chunks=chunks)
hf.close()
print(('Data was successfully saved as ' + filename)) | def write_hdf5_dict(filepath, data_dict, chunks=None):
'\n Stores data_dict\n Parameters\n ----------\n filepath : str\n file path where data will be stored. (Do not include extension- .h5)\n data_dict : dictionary\n data should be stored as data_dict[key]= data_arrays\n\n Returns\n -------\n\n '
filedir = os.path.split(filepath)[0]
if (not os.path.exists(filedir)):
os.makedirs(filedir)
ext = '.h5'
filename = (filepath + ext)
hf = h5py.File(filename, 'w')
for key in data_dict:
if ((chunks is None) or ((key == 'x') or (key == 'y') or (key == 'z'))):
hf.create_dataset(key, data=data_dict[key])
else:
hf.create_dataset(key, data=data_dict[key], chunks=chunks)
hf.close()
print(('Data was successfully saved as ' + filename))<|docstring|>Stores data_dict
Parameters
----------
filepath : str
file path where data will be stored. (Do not include extension- .h5)
data_dict : dictionary
data should be stored as data_dict[key]= data_arrays
Returns
-------<|endoftext|> |
5651826e0c81bbfd2b0ee2cd647becdc6460b4c963e3f2cf47795340ead78559 | def davis2hdf5_dirbase(dirbase, use_chunks, savedir=None, header='B', scale=1000.0, fps=1.0, mode='piv', start=0, end=None):
'\n Convert multiple davis outputs into hdf5 files\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
if (savedir is None):
if (dirbase[(- 1)] == '/'):
savedir = os.path.split(dirbase[:(- 1)])[0]
else:
savedir = os.path.split(dirbase)[0]
datadirs = glob.glob((dirbase + '/*'))
for datadir in tqdm(datadirs, desc='datadir'):
davis2hdf5(datadir, use_chunks, savedir=savedir, header=header, scale=scale, fps=fps, mode=mode, start=start, end=end)
print('... Done') | Convert multiple davis outputs into hdf5 files
Parameters
----------
dirbase
savedir
Returns
------- | davis2hdf5.py | davis2hdf5_dirbase | tmatsuzawa/tflow | 1 | python | def davis2hdf5_dirbase(dirbase, use_chunks, savedir=None, header='B', scale=1000.0, fps=1.0, mode='piv', start=0, end=None):
'\n Convert multiple davis outputs into hdf5 files\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
if (savedir is None):
if (dirbase[(- 1)] == '/'):
savedir = os.path.split(dirbase[:(- 1)])[0]
else:
savedir = os.path.split(dirbase)[0]
datadirs = glob.glob((dirbase + '/*'))
for datadir in tqdm(datadirs, desc='datadir'):
davis2hdf5(datadir, use_chunks, savedir=savedir, header=header, scale=scale, fps=fps, mode=mode, start=start, end=end)
print('... Done') | def davis2hdf5_dirbase(dirbase, use_chunks, savedir=None, header='B', scale=1000.0, fps=1.0, mode='piv', start=0, end=None):
'\n Convert multiple davis outputs into hdf5 files\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
if (savedir is None):
if (dirbase[(- 1)] == '/'):
savedir = os.path.split(dirbase[:(- 1)])[0]
else:
savedir = os.path.split(dirbase)[0]
datadirs = glob.glob((dirbase + '/*'))
for datadir in tqdm(datadirs, desc='datadir'):
davis2hdf5(datadir, use_chunks, savedir=savedir, header=header, scale=scale, fps=fps, mode=mode, start=start, end=end)
print('... Done')<|docstring|>Convert multiple davis outputs into hdf5 files
Parameters
----------
dirbase
savedir
Returns
-------<|endoftext|> |
2ed6fe5b206df528fcb1855e232d36c9d9935a3be7b61d71b4e07dbc6713b4ea | def davis2hdf5_piv(datadir, use_chunks, savedir=None, savepath=None, header='B', scale=1000.0, chunks=None, fps=1.0, start=0, end=None):
'\n Convert multiple DaVis output (PIV) into a hdf5 file\n\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
davis_dpaths = glob.glob((datadir + ('/%s*' % header)))
davis_dpaths = natural_sort(davis_dpaths)
davis_dpaths = davis_dpaths[start:end]
duration = len(davis_dpaths)
for (t, dpath) in enumerate(tqdm(davis_dpaths)):
with open(dpath, 'r') as fyle:
(xlist, ylist, ulist, vlist) = ([], [], [], [])
lines = fyle.readlines()
if lines[0].__contains__('DaVis;'):
delimitter = ';'
else:
delimitter = ' '
if (delimitter == ' '):
(height, width) = (int(lines[0].split(delimitter)[4]), int(lines[0].split(delimitter)[5]))
else:
(height, width) = (int(lines[0].split(delimitter)[3]), int(lines[0].split(delimitter)[4]))
shape = (height, width)
for (i, line) in enumerate(lines):
if (i == 0):
if line.__contains__(('"Position"%s"mm"' % delimitter)):
scale = 1.0
pos_unit = 'mm'
else:
pos_unit = 'px'
if line.__contains__(('"velocity"%s"m/s"' % delimitter)):
vscale = 1000.0
vel_unit = 'm/s'
elif line.__contains__(('"displacement"%s"pixel"' % delimitter)):
vscale = (scale * fps)
vel_unit = 'px/frame'
else:
vscale = 1.0
vel_unit = '????'
if (t == 0):
print(('\n Units of Position and Velocity: ' + pos_unit), vel_unit)
if (vel_unit == 'px/frame'):
print(('scale (mm/px), frame rate(fps): %.5f, %.1f' % (scale, fps)))
elif (vel_unit == 'm/s'):
print('... velocity gets converted to mm/s')
if (i > 0):
if (delimitter == ' '):
line = line.replace(',', '.').split()
else:
line = line.split(delimitter)
(x, y, u, v) = [float(i) for i in line]
if (u == 0):
u = np.nan
if (v == 0):
v = np.nan
xlist.append(x)
ylist.append(y)
ulist.append(u)
vlist.append(v)
x_arr = (np.asarray(xlist).reshape(shape) * scale)
y_arr = (np.asarray(ylist).reshape(shape) * scale)
u_arr = (np.asarray(ulist).reshape(shape) * vscale)
v_arr = (np.asarray(vlist).reshape(shape) * vscale)
if (t == 0):
shape_d = (shape[0], shape[1], duration)
(uxdata_d, uydata_d) = (np.empty(shape_d), np.empty(shape_d))
uxdata_d[(..., t)] = u_arr
uydata_d[(..., t)] = v_arr
udata_d = np.stack((uxdata_d, uydata_d))
if (datadir[(- 1)] == '/'):
(savedir_default, dirname) = os.path.split(datadir[:(- 1)])
else:
(savedir_default, dirname) = os.path.split(datadir)
if (savedir is None):
savedir = savedir_default
savepath = ((savedir + '/davis_piv_outputs/') + dirname)
data2write = {}
data2write['ux'] = udata_d[(0, ...)]
data2write['uy'] = udata_d[(1, ...)]
data2write['x'] = x_arr
data2write['y'] = y_arr
if use_chunks:
chunks = (udata_d.shape[1:(- 1)] + (1,))
else:
chunks = None
write_hdf5_dict(savepath, data2write, chunks=chunks)
print('... Done') | Convert multiple DaVis output (PIV) into a hdf5 file
Parameters
----------
dirbase
savedir
Returns
------- | davis2hdf5.py | davis2hdf5_piv | tmatsuzawa/tflow | 1 | python | def davis2hdf5_piv(datadir, use_chunks, savedir=None, savepath=None, header='B', scale=1000.0, chunks=None, fps=1.0, start=0, end=None):
'\n Convert multiple DaVis output (PIV) into a hdf5 file\n\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
davis_dpaths = glob.glob((datadir + ('/%s*' % header)))
davis_dpaths = natural_sort(davis_dpaths)
davis_dpaths = davis_dpaths[start:end]
duration = len(davis_dpaths)
for (t, dpath) in enumerate(tqdm(davis_dpaths)):
with open(dpath, 'r') as fyle:
(xlist, ylist, ulist, vlist) = ([], [], [], [])
lines = fyle.readlines()
if lines[0].__contains__('DaVis;'):
delimitter = ';'
else:
delimitter = ' '
if (delimitter == ' '):
(height, width) = (int(lines[0].split(delimitter)[4]), int(lines[0].split(delimitter)[5]))
else:
(height, width) = (int(lines[0].split(delimitter)[3]), int(lines[0].split(delimitter)[4]))
shape = (height, width)
for (i, line) in enumerate(lines):
if (i == 0):
if line.__contains__(('"Position"%s"mm"' % delimitter)):
scale = 1.0
pos_unit = 'mm'
else:
pos_unit = 'px'
if line.__contains__(('"velocity"%s"m/s"' % delimitter)):
vscale = 1000.0
vel_unit = 'm/s'
elif line.__contains__(('"displacement"%s"pixel"' % delimitter)):
vscale = (scale * fps)
vel_unit = 'px/frame'
else:
vscale = 1.0
vel_unit = '????'
if (t == 0):
print(('\n Units of Position and Velocity: ' + pos_unit), vel_unit)
if (vel_unit == 'px/frame'):
print(('scale (mm/px), frame rate(fps): %.5f, %.1f' % (scale, fps)))
elif (vel_unit == 'm/s'):
print('... velocity gets converted to mm/s')
if (i > 0):
if (delimitter == ' '):
line = line.replace(',', '.').split()
else:
line = line.split(delimitter)
(x, y, u, v) = [float(i) for i in line]
if (u == 0):
u = np.nan
if (v == 0):
v = np.nan
xlist.append(x)
ylist.append(y)
ulist.append(u)
vlist.append(v)
x_arr = (np.asarray(xlist).reshape(shape) * scale)
y_arr = (np.asarray(ylist).reshape(shape) * scale)
u_arr = (np.asarray(ulist).reshape(shape) * vscale)
v_arr = (np.asarray(vlist).reshape(shape) * vscale)
if (t == 0):
shape_d = (shape[0], shape[1], duration)
(uxdata_d, uydata_d) = (np.empty(shape_d), np.empty(shape_d))
uxdata_d[(..., t)] = u_arr
uydata_d[(..., t)] = v_arr
udata_d = np.stack((uxdata_d, uydata_d))
if (datadir[(- 1)] == '/'):
(savedir_default, dirname) = os.path.split(datadir[:(- 1)])
else:
(savedir_default, dirname) = os.path.split(datadir)
if (savedir is None):
savedir = savedir_default
savepath = ((savedir + '/davis_piv_outputs/') + dirname)
data2write = {}
data2write['ux'] = udata_d[(0, ...)]
data2write['uy'] = udata_d[(1, ...)]
data2write['x'] = x_arr
data2write['y'] = y_arr
if use_chunks:
chunks = (udata_d.shape[1:(- 1)] + (1,))
else:
chunks = None
write_hdf5_dict(savepath, data2write, chunks=chunks)
print('... Done') | def davis2hdf5_piv(datadir, use_chunks, savedir=None, savepath=None, header='B', scale=1000.0, chunks=None, fps=1.0, start=0, end=None):
'\n Convert multiple DaVis output (PIV) into a hdf5 file\n\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
davis_dpaths = glob.glob((datadir + ('/%s*' % header)))
davis_dpaths = natural_sort(davis_dpaths)
davis_dpaths = davis_dpaths[start:end]
duration = len(davis_dpaths)
for (t, dpath) in enumerate(tqdm(davis_dpaths)):
with open(dpath, 'r') as fyle:
(xlist, ylist, ulist, vlist) = ([], [], [], [])
lines = fyle.readlines()
if lines[0].__contains__('DaVis;'):
delimitter = ';'
else:
delimitter = ' '
if (delimitter == ' '):
(height, width) = (int(lines[0].split(delimitter)[4]), int(lines[0].split(delimitter)[5]))
else:
(height, width) = (int(lines[0].split(delimitter)[3]), int(lines[0].split(delimitter)[4]))
shape = (height, width)
for (i, line) in enumerate(lines):
if (i == 0):
if line.__contains__(('"Position"%s"mm"' % delimitter)):
scale = 1.0
pos_unit = 'mm'
else:
pos_unit = 'px'
if line.__contains__(('"velocity"%s"m/s"' % delimitter)):
vscale = 1000.0
vel_unit = 'm/s'
elif line.__contains__(('"displacement"%s"pixel"' % delimitter)):
vscale = (scale * fps)
vel_unit = 'px/frame'
else:
vscale = 1.0
vel_unit = '????'
if (t == 0):
print(('\n Units of Position and Velocity: ' + pos_unit), vel_unit)
if (vel_unit == 'px/frame'):
print(('scale (mm/px), frame rate(fps): %.5f, %.1f' % (scale, fps)))
elif (vel_unit == 'm/s'):
print('... velocity gets converted to mm/s')
if (i > 0):
if (delimitter == ' '):
line = line.replace(',', '.').split()
else:
line = line.split(delimitter)
(x, y, u, v) = [float(i) for i in line]
if (u == 0):
u = np.nan
if (v == 0):
v = np.nan
xlist.append(x)
ylist.append(y)
ulist.append(u)
vlist.append(v)
x_arr = (np.asarray(xlist).reshape(shape) * scale)
y_arr = (np.asarray(ylist).reshape(shape) * scale)
u_arr = (np.asarray(ulist).reshape(shape) * vscale)
v_arr = (np.asarray(vlist).reshape(shape) * vscale)
if (t == 0):
shape_d = (shape[0], shape[1], duration)
(uxdata_d, uydata_d) = (np.empty(shape_d), np.empty(shape_d))
uxdata_d[(..., t)] = u_arr
uydata_d[(..., t)] = v_arr
udata_d = np.stack((uxdata_d, uydata_d))
if (datadir[(- 1)] == '/'):
(savedir_default, dirname) = os.path.split(datadir[:(- 1)])
else:
(savedir_default, dirname) = os.path.split(datadir)
if (savedir is None):
savedir = savedir_default
savepath = ((savedir + '/davis_piv_outputs/') + dirname)
data2write = {}
data2write['ux'] = udata_d[(0, ...)]
data2write['uy'] = udata_d[(1, ...)]
data2write['x'] = x_arr
data2write['y'] = y_arr
if use_chunks:
chunks = (udata_d.shape[1:(- 1)] + (1,))
else:
chunks = None
write_hdf5_dict(savepath, data2write, chunks=chunks)
print('... Done')<|docstring|>Convert multiple DaVis output (PIV) into a hdf5 file
Parameters
----------
dirbase
savedir
Returns
-------<|endoftext|> |
2f5aa5f752b870f236bd21e2417988655ad82a4b061eca105721ebc99d59d31f | def davis2hdf5_stb(datadir, use_chunks, savedir=None, savepath=None, header='B', scale=1000.0, chunks=None, fps=1.0, start=0, end=None):
'\n Convert multiple DaVis output (PIV) into a hdf5 file\n\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
def format_array(arr1d, shape):
'\n Formats a 1d array output by the DaVis STB export feature into the convention used by udata\n ... Convention is (y, x, z). The array must have a shape (height, width, depth).\n\n Parameters\n ----------\n arr1d: 1d array-like\n shape: tuple,\n ... size of the 3D array (height, width, depth)\n\n Returns\n -------\n arr: array\n ...\n '
newshape = (shape[2], shape[0], shape[1])
arr1d = np.asarray(arr1d)
arr = arr1d.reshape(newshape)
arr = np.swapaxes(arr, 0, 2)
arr = np.swapaxes(arr, 0, 1)
return arr
davis_dpaths = glob.glob((datadir + ('/%s*' % header)))
davis_dpaths = natural_sort(davis_dpaths)
davis_dpaths = davis_dpaths[start:end]
duration = len(davis_dpaths)
for (t, dpath) in enumerate(tqdm(davis_dpaths)):
with open(dpath, 'r') as fyle:
(xlist, ylist, zlist, ulist, vlist, wlist) = ([], [], [], [], [], [])
lines = fyle.readlines()
line = lines[0].replace(';', ' ').split()
(height, width, depth) = (int(line[5]), int(line[6]), int(line[7][:(- 3)]))
shape = (height, width, depth)
for (i, line) in enumerate(lines):
if (i <= ((height * width) * depth)):
if (i == 0):
if line.__contains__('"X";"mm"'):
scale = 1.0
pos_unit = 'mm'
else:
pos_unit = 'px'
if line.__contains__('"velocity";"m/s"'):
vscale = 1000.0
vel_unit = 'm/s'
elif line.__contains__('"displacement" "pixel"'):
vscale = (scale * fps)
vel_unit = 'px/frame'
else:
vscale = 1.0
vel_unit = '????'
if (t == 0):
print(('\n Units of Position and Velocity: ' + pos_unit), vel_unit)
if (vel_unit == 'px/frame'):
print(('scale (mm/px), frame rate(fps): %.5f, %.1f' % (scale, fps)))
if (i > 0):
line = line.replace(';', ' ').split()
(x, y, z, u, v, w) = [float(i) for i in line]
if (u == 0):
u = np.nan
if (v == 0):
v = np.nan
if (w == 0):
w = np.nan
xlist.append(x)
ylist.append(y)
zlist.append(z)
ulist.append(u)
vlist.append(v)
wlist.append(w)
x_arr = (format_array(xlist, shape) * scale)
y_arr = (format_array(ylist, shape) * scale)
z_arr = (format_array(zlist, shape) * scale)
u_arr = (format_array(ulist, shape) * vscale)
v_arr = (format_array(vlist, shape) * vscale)
w_arr = (format_array(wlist, shape) * vscale)
if (t == 0):
shape_d = (shape + (duration,))
(uxdata_d, uydata_d, uzdata_d) = (np.empty(shape_d), np.empty(shape_d), np.empty(shape_d))
uxdata_d[(..., t)] = u_arr
uydata_d[(..., t)] = v_arr
uzdata_d[(..., t)] = w_arr
udata_d = np.stack((uxdata_d, uydata_d, uzdata_d))
if (datadir[(- 1)] == '/'):
(savedir_default, dirname) = os.path.split(datadir[:(- 1)])
else:
(savedir_default, dirname) = os.path.split(datadir)
if (savedir is None):
savedir = savedir_default
savepath = ((savedir + '/davis_stb_outputs/') + dirname)
if ((start != 0) or (end is not None)):
if (end is None):
end = (len(davis_dpaths) - 1)
savepath += ('%05d_%05d' % (start, end))
data2write = {}
data2write['ux'] = udata_d[(0, ...)]
data2write['uy'] = udata_d[(1, ...)]
data2write['uz'] = udata_d[(2, ...)]
data2write['x'] = x_arr
data2write['y'] = y_arr
data2write['z'] = z_arr
if use_chunks:
chunks = (udata_d.shape[1:(- 1)] + (1,))
else:
chunks = None
write_hdf5_dict(savepath, data2write, chunks=chunks)
print('... Done') | Convert multiple DaVis output (PIV) into a hdf5 file
Parameters
----------
dirbase
savedir
Returns
------- | davis2hdf5.py | davis2hdf5_stb | tmatsuzawa/tflow | 1 | python | def davis2hdf5_stb(datadir, use_chunks, savedir=None, savepath=None, header='B', scale=1000.0, chunks=None, fps=1.0, start=0, end=None):
'\n Convert multiple DaVis output (PIV) into a hdf5 file\n\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
def format_array(arr1d, shape):
'\n Formats a 1d array output by the DaVis STB export feature into the convention used by udata\n ... Convention is (y, x, z). The array must have a shape (height, width, depth).\n\n Parameters\n ----------\n arr1d: 1d array-like\n shape: tuple,\n ... size of the 3D array (height, width, depth)\n\n Returns\n -------\n arr: array\n ...\n '
newshape = (shape[2], shape[0], shape[1])
arr1d = np.asarray(arr1d)
arr = arr1d.reshape(newshape)
arr = np.swapaxes(arr, 0, 2)
arr = np.swapaxes(arr, 0, 1)
return arr
davis_dpaths = glob.glob((datadir + ('/%s*' % header)))
davis_dpaths = natural_sort(davis_dpaths)
davis_dpaths = davis_dpaths[start:end]
duration = len(davis_dpaths)
for (t, dpath) in enumerate(tqdm(davis_dpaths)):
with open(dpath, 'r') as fyle:
(xlist, ylist, zlist, ulist, vlist, wlist) = ([], [], [], [], [], [])
lines = fyle.readlines()
line = lines[0].replace(';', ' ').split()
(height, width, depth) = (int(line[5]), int(line[6]), int(line[7][:(- 3)]))
shape = (height, width, depth)
for (i, line) in enumerate(lines):
if (i <= ((height * width) * depth)):
if (i == 0):
if line.__contains__('"X";"mm"'):
scale = 1.0
pos_unit = 'mm'
else:
pos_unit = 'px'
if line.__contains__('"velocity";"m/s"'):
vscale = 1000.0
vel_unit = 'm/s'
elif line.__contains__('"displacement" "pixel"'):
vscale = (scale * fps)
vel_unit = 'px/frame'
else:
vscale = 1.0
vel_unit = '????'
if (t == 0):
print(('\n Units of Position and Velocity: ' + pos_unit), vel_unit)
if (vel_unit == 'px/frame'):
print(('scale (mm/px), frame rate(fps): %.5f, %.1f' % (scale, fps)))
if (i > 0):
line = line.replace(';', ' ').split()
(x, y, z, u, v, w) = [float(i) for i in line]
if (u == 0):
u = np.nan
if (v == 0):
v = np.nan
if (w == 0):
w = np.nan
xlist.append(x)
ylist.append(y)
zlist.append(z)
ulist.append(u)
vlist.append(v)
wlist.append(w)
x_arr = (format_array(xlist, shape) * scale)
y_arr = (format_array(ylist, shape) * scale)
z_arr = (format_array(zlist, shape) * scale)
u_arr = (format_array(ulist, shape) * vscale)
v_arr = (format_array(vlist, shape) * vscale)
w_arr = (format_array(wlist, shape) * vscale)
if (t == 0):
shape_d = (shape + (duration,))
(uxdata_d, uydata_d, uzdata_d) = (np.empty(shape_d), np.empty(shape_d), np.empty(shape_d))
uxdata_d[(..., t)] = u_arr
uydata_d[(..., t)] = v_arr
uzdata_d[(..., t)] = w_arr
udata_d = np.stack((uxdata_d, uydata_d, uzdata_d))
if (datadir[(- 1)] == '/'):
(savedir_default, dirname) = os.path.split(datadir[:(- 1)])
else:
(savedir_default, dirname) = os.path.split(datadir)
if (savedir is None):
savedir = savedir_default
savepath = ((savedir + '/davis_stb_outputs/') + dirname)
if ((start != 0) or (end is not None)):
if (end is None):
end = (len(davis_dpaths) - 1)
savepath += ('%05d_%05d' % (start, end))
data2write = {}
data2write['ux'] = udata_d[(0, ...)]
data2write['uy'] = udata_d[(1, ...)]
data2write['uz'] = udata_d[(2, ...)]
data2write['x'] = x_arr
data2write['y'] = y_arr
data2write['z'] = z_arr
if use_chunks:
chunks = (udata_d.shape[1:(- 1)] + (1,))
else:
chunks = None
write_hdf5_dict(savepath, data2write, chunks=chunks)
print('... Done') | def davis2hdf5_stb(datadir, use_chunks, savedir=None, savepath=None, header='B', scale=1000.0, chunks=None, fps=1.0, start=0, end=None):
'\n Convert multiple DaVis output (PIV) into a hdf5 file\n\n\n Parameters\n ----------\n dirbase\n savedir\n\n Returns\n -------\n\n '
def format_array(arr1d, shape):
'\n Formats a 1d array output by the DaVis STB export feature into the convention used by udata\n ... Convention is (y, x, z). The array must have a shape (height, width, depth).\n\n Parameters\n ----------\n arr1d: 1d array-like\n shape: tuple,\n ... size of the 3D array (height, width, depth)\n\n Returns\n -------\n arr: array\n ...\n '
newshape = (shape[2], shape[0], shape[1])
arr1d = np.asarray(arr1d)
arr = arr1d.reshape(newshape)
arr = np.swapaxes(arr, 0, 2)
arr = np.swapaxes(arr, 0, 1)
return arr
davis_dpaths = glob.glob((datadir + ('/%s*' % header)))
davis_dpaths = natural_sort(davis_dpaths)
davis_dpaths = davis_dpaths[start:end]
duration = len(davis_dpaths)
for (t, dpath) in enumerate(tqdm(davis_dpaths)):
with open(dpath, 'r') as fyle:
(xlist, ylist, zlist, ulist, vlist, wlist) = ([], [], [], [], [], [])
lines = fyle.readlines()
line = lines[0].replace(';', ' ').split()
(height, width, depth) = (int(line[5]), int(line[6]), int(line[7][:(- 3)]))
shape = (height, width, depth)
for (i, line) in enumerate(lines):
if (i <= ((height * width) * depth)):
if (i == 0):
if line.__contains__('"X";"mm"'):
scale = 1.0
pos_unit = 'mm'
else:
pos_unit = 'px'
if line.__contains__('"velocity";"m/s"'):
vscale = 1000.0
vel_unit = 'm/s'
elif line.__contains__('"displacement" "pixel"'):
vscale = (scale * fps)
vel_unit = 'px/frame'
else:
vscale = 1.0
vel_unit = '????'
if (t == 0):
print(('\n Units of Position and Velocity: ' + pos_unit), vel_unit)
if (vel_unit == 'px/frame'):
print(('scale (mm/px), frame rate(fps): %.5f, %.1f' % (scale, fps)))
if (i > 0):
line = line.replace(';', ' ').split()
(x, y, z, u, v, w) = [float(i) for i in line]
if (u == 0):
u = np.nan
if (v == 0):
v = np.nan
if (w == 0):
w = np.nan
xlist.append(x)
ylist.append(y)
zlist.append(z)
ulist.append(u)
vlist.append(v)
wlist.append(w)
x_arr = (format_array(xlist, shape) * scale)
y_arr = (format_array(ylist, shape) * scale)
z_arr = (format_array(zlist, shape) * scale)
u_arr = (format_array(ulist, shape) * vscale)
v_arr = (format_array(vlist, shape) * vscale)
w_arr = (format_array(wlist, shape) * vscale)
if (t == 0):
shape_d = (shape + (duration,))
(uxdata_d, uydata_d, uzdata_d) = (np.empty(shape_d), np.empty(shape_d), np.empty(shape_d))
uxdata_d[(..., t)] = u_arr
uydata_d[(..., t)] = v_arr
uzdata_d[(..., t)] = w_arr
udata_d = np.stack((uxdata_d, uydata_d, uzdata_d))
if (datadir[(- 1)] == '/'):
(savedir_default, dirname) = os.path.split(datadir[:(- 1)])
else:
(savedir_default, dirname) = os.path.split(datadir)
if (savedir is None):
savedir = savedir_default
savepath = ((savedir + '/davis_stb_outputs/') + dirname)
if ((start != 0) or (end is not None)):
if (end is None):
end = (len(davis_dpaths) - 1)
savepath += ('%05d_%05d' % (start, end))
data2write = {}
data2write['ux'] = udata_d[(0, ...)]
data2write['uy'] = udata_d[(1, ...)]
data2write['uz'] = udata_d[(2, ...)]
data2write['x'] = x_arr
data2write['y'] = y_arr
data2write['z'] = z_arr
if use_chunks:
chunks = (udata_d.shape[1:(- 1)] + (1,))
else:
chunks = None
write_hdf5_dict(savepath, data2write, chunks=chunks)
print('... Done')<|docstring|>Convert multiple DaVis output (PIV) into a hdf5 file
Parameters
----------
dirbase
savedir
Returns
-------<|endoftext|> |
177d70f55976650a81f1d2360d4a6edec694b150a051d893330215d5471207e0 | def format_array(arr1d, shape):
'\n Formats a 1d array output by the DaVis STB export feature into the convention used by udata\n ... Convention is (y, x, z). The array must have a shape (height, width, depth).\n\n Parameters\n ----------\n arr1d: 1d array-like\n shape: tuple,\n ... size of the 3D array (height, width, depth)\n\n Returns\n -------\n arr: array\n ...\n '
newshape = (shape[2], shape[0], shape[1])
arr1d = np.asarray(arr1d)
arr = arr1d.reshape(newshape)
arr = np.swapaxes(arr, 0, 2)
arr = np.swapaxes(arr, 0, 1)
return arr | Formats a 1d array output by the DaVis STB export feature into the convention used by udata
... Convention is (y, x, z). The array must have a shape (height, width, depth).
Parameters
----------
arr1d: 1d array-like
shape: tuple,
... size of the 3D array (height, width, depth)
Returns
-------
arr: array
... | davis2hdf5.py | format_array | tmatsuzawa/tflow | 1 | python | def format_array(arr1d, shape):
'\n Formats a 1d array output by the DaVis STB export feature into the convention used by udata\n ... Convention is (y, x, z). The array must have a shape (height, width, depth).\n\n Parameters\n ----------\n arr1d: 1d array-like\n shape: tuple,\n ... size of the 3D array (height, width, depth)\n\n Returns\n -------\n arr: array\n ...\n '
newshape = (shape[2], shape[0], shape[1])
arr1d = np.asarray(arr1d)
arr = arr1d.reshape(newshape)
arr = np.swapaxes(arr, 0, 2)
arr = np.swapaxes(arr, 0, 1)
return arr | def format_array(arr1d, shape):
'\n Formats a 1d array output by the DaVis STB export feature into the convention used by udata\n ... Convention is (y, x, z). The array must have a shape (height, width, depth).\n\n Parameters\n ----------\n arr1d: 1d array-like\n shape: tuple,\n ... size of the 3D array (height, width, depth)\n\n Returns\n -------\n arr: array\n ...\n '
newshape = (shape[2], shape[0], shape[1])
arr1d = np.asarray(arr1d)
arr = arr1d.reshape(newshape)
arr = np.swapaxes(arr, 0, 2)
arr = np.swapaxes(arr, 0, 1)
return arr<|docstring|>Formats a 1d array output by the DaVis STB export feature into the convention used by udata
... Convention is (y, x, z). The array must have a shape (height, width, depth).
Parameters
----------
arr1d: 1d array-like
shape: tuple,
... size of the 3D array (height, width, depth)
Returns
-------
arr: array
...<|endoftext|> |
9d88a6f50886a95ca4b813f28835976921ae8da65d9bd09aecff6a52be500945 | def atoi(text):
'natural sorting'
return (int(text) if text.isdigit() else text) | natural sorting | davis2hdf5.py | atoi | tmatsuzawa/tflow | 1 | python | def atoi(text):
return (int(text) if text.isdigit() else text) | def atoi(text):
return (int(text) if text.isdigit() else text)<|docstring|>natural sorting<|endoftext|> |
94a339a2a99b64ed683a34d0b4b7a95c9cd7d122b0237fbc680ae93cd36cbfd2 | def natural_keys(text):
"\n natural sorting\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n "
return [atoi(c) for c in re.split('(\\d+)', text)] | natural sorting
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments) | davis2hdf5.py | natural_keys | tmatsuzawa/tflow | 1 | python | def natural_keys(text):
"\n natural sorting\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n "
return [atoi(c) for c in re.split('(\\d+)', text)] | def natural_keys(text):
"\n natural sorting\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n "
return [atoi(c) for c in re.split('(\\d+)', text)]<|docstring|>natural sorting
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)<|endoftext|> |
ec7b710897a185c03ad51843e16e26ccb11e0179b1848605de61e9ef6ab66618 | @timeout(200)
@timeout_decorator
def get_page(url, user_verify=True, need_login=True):
'\n :param url: 待出现\n :param user_verify: 是否为可能出现验证码的页面(ajax连接不会出现验证码,如果是请求微博或者用户信息可能出现验证码),否为抓取转发的ajax连接\n :param need_login: 抓取页面是否需要登录,这样做可以减小一些账号的压力\n :return: 返回请求的数据,如果出现404或者403,或者是别的异常,都返回空字符串\n '
crawler.info('本次抓取的url为{url}'.format(url=url))
count = 0
latest_name_cookies = None
while (count < max_retries):
if need_login:
name_cookies = Cookies.fetch_cookies()
if (name_cookies is None):
crawler.warning('cookie池中不存在cookie,正在检查是否有可用账号')
rs = get_login_info()
if (len(rs) == 0):
crawler.error('账号均不可用,请检查账号健康状况')
if ('win32' in sys.platform):
os.popen('taskkill /F /IM "celery*"')
else:
os.popen('pkill -f "celery"')
else:
crawler.info('重新获取cookie中...')
login.excute_login_task()
time.sleep(10)
if (name_cookies == latest_name_cookies):
continue
latest_name_cookies = name_cookies
try:
if need_login:
resp = requests.get(url, headers=headers, cookies=name_cookies[1], timeout=time_out, verify=False)
if ("$CONFIG['islogin'] = '0'" in resp.text):
crawler.warning('账号{}出现异常'.format(name_cookies[0]))
freeze_account(name_cookies[0])
Cookies.delete_cookies(name_cookies[0])
continue
else:
resp = requests.get(url, headers=headers, timeout=time_out, verify=False)
page = resp.text
if page:
page = page.encode('utf-8', 'ignore').decode('utf-8')
else:
continue
time.sleep(interal)
if user_verify:
if (('unfreeze' in resp.url) or is_403(page)):
crawler.warning('账号{}已经被冻结'.format(name_cookies[0]))
freeze_account(name_cookies[0])
Cookies.delete_cookies(name_cookies[0])
count += 1
continue
if (not is_complete(page)):
count += 1
continue
if is_404(page):
crawler.warning('url为{url}的连接不存在'.format(url=url))
return ''
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError, AttributeError) as e:
crawler.warning('抓取{}出现异常,具体信息是{}'.format(url, e))
count += 1
time.sleep(excp_interal)
else:
Urls.store_crawl_url(url, 1)
return page
crawler.warning('抓取{}已达到最大重试次数,请在redis的失败队列中查看该url并检查原因'.format(url))
Urls.store_crawl_url(url, 0)
return '' | :param url: 待出现
:param user_verify: 是否为可能出现验证码的页面(ajax连接不会出现验证码,如果是请求微博或者用户信息可能出现验证码),否为抓取转发的ajax连接
:param need_login: 抓取页面是否需要登录,这样做可以减小一些账号的压力
:return: 返回请求的数据,如果出现404或者403,或者是别的异常,都返回空字符串 | page_get/basic.py | get_page | vfhky/WeiboSpider | 5 | python | @timeout(200)
@timeout_decorator
def get_page(url, user_verify=True, need_login=True):
'\n :param url: 待出现\n :param user_verify: 是否为可能出现验证码的页面(ajax连接不会出现验证码,如果是请求微博或者用户信息可能出现验证码),否为抓取转发的ajax连接\n :param need_login: 抓取页面是否需要登录,这样做可以减小一些账号的压力\n :return: 返回请求的数据,如果出现404或者403,或者是别的异常,都返回空字符串\n '
crawler.info('本次抓取的url为{url}'.format(url=url))
count = 0
latest_name_cookies = None
while (count < max_retries):
if need_login:
name_cookies = Cookies.fetch_cookies()
if (name_cookies is None):
crawler.warning('cookie池中不存在cookie,正在检查是否有可用账号')
rs = get_login_info()
if (len(rs) == 0):
crawler.error('账号均不可用,请检查账号健康状况')
if ('win32' in sys.platform):
os.popen('taskkill /F /IM "celery*"')
else:
os.popen('pkill -f "celery"')
else:
crawler.info('重新获取cookie中...')
login.excute_login_task()
time.sleep(10)
if (name_cookies == latest_name_cookies):
continue
latest_name_cookies = name_cookies
try:
if need_login:
resp = requests.get(url, headers=headers, cookies=name_cookies[1], timeout=time_out, verify=False)
if ("$CONFIG['islogin'] = '0'" in resp.text):
crawler.warning('账号{}出现异常'.format(name_cookies[0]))
freeze_account(name_cookies[0])
Cookies.delete_cookies(name_cookies[0])
continue
else:
resp = requests.get(url, headers=headers, timeout=time_out, verify=False)
page = resp.text
if page:
page = page.encode('utf-8', 'ignore').decode('utf-8')
else:
continue
time.sleep(interal)
if user_verify:
if (('unfreeze' in resp.url) or is_403(page)):
crawler.warning('账号{}已经被冻结'.format(name_cookies[0]))
freeze_account(name_cookies[0])
Cookies.delete_cookies(name_cookies[0])
count += 1
continue
if (not is_complete(page)):
count += 1
continue
if is_404(page):
crawler.warning('url为{url}的连接不存在'.format(url=url))
return
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError, AttributeError) as e:
crawler.warning('抓取{}出现异常,具体信息是{}'.format(url, e))
count += 1
time.sleep(excp_interal)
else:
Urls.store_crawl_url(url, 1)
return page
crawler.warning('抓取{}已达到最大重试次数,请在redis的失败队列中查看该url并检查原因'.format(url))
Urls.store_crawl_url(url, 0)
return | @timeout(200)
@timeout_decorator
def get_page(url, user_verify=True, need_login=True):
'\n :param url: 待出现\n :param user_verify: 是否为可能出现验证码的页面(ajax连接不会出现验证码,如果是请求微博或者用户信息可能出现验证码),否为抓取转发的ajax连接\n :param need_login: 抓取页面是否需要登录,这样做可以减小一些账号的压力\n :return: 返回请求的数据,如果出现404或者403,或者是别的异常,都返回空字符串\n '
crawler.info('本次抓取的url为{url}'.format(url=url))
count = 0
latest_name_cookies = None
while (count < max_retries):
if need_login:
name_cookies = Cookies.fetch_cookies()
if (name_cookies is None):
crawler.warning('cookie池中不存在cookie,正在检查是否有可用账号')
rs = get_login_info()
if (len(rs) == 0):
crawler.error('账号均不可用,请检查账号健康状况')
if ('win32' in sys.platform):
os.popen('taskkill /F /IM "celery*"')
else:
os.popen('pkill -f "celery"')
else:
crawler.info('重新获取cookie中...')
login.excute_login_task()
time.sleep(10)
if (name_cookies == latest_name_cookies):
continue
latest_name_cookies = name_cookies
try:
if need_login:
resp = requests.get(url, headers=headers, cookies=name_cookies[1], timeout=time_out, verify=False)
if ("$CONFIG['islogin'] = '0'" in resp.text):
crawler.warning('账号{}出现异常'.format(name_cookies[0]))
freeze_account(name_cookies[0])
Cookies.delete_cookies(name_cookies[0])
continue
else:
resp = requests.get(url, headers=headers, timeout=time_out, verify=False)
page = resp.text
if page:
page = page.encode('utf-8', 'ignore').decode('utf-8')
else:
continue
time.sleep(interal)
if user_verify:
if (('unfreeze' in resp.url) or is_403(page)):
crawler.warning('账号{}已经被冻结'.format(name_cookies[0]))
freeze_account(name_cookies[0])
Cookies.delete_cookies(name_cookies[0])
count += 1
continue
if (not is_complete(page)):
count += 1
continue
if is_404(page):
crawler.warning('url为{url}的连接不存在'.format(url=url))
return
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError, AttributeError) as e:
crawler.warning('抓取{}出现异常,具体信息是{}'.format(url, e))
count += 1
time.sleep(excp_interal)
else:
Urls.store_crawl_url(url, 1)
return page
crawler.warning('抓取{}已达到最大重试次数,请在redis的失败队列中查看该url并检查原因'.format(url))
Urls.store_crawl_url(url, 0)
return <|docstring|>:param url: 待出现
:param user_verify: 是否为可能出现验证码的页面(ajax连接不会出现验证码,如果是请求微博或者用户信息可能出现验证码),否为抓取转发的ajax连接
:param need_login: 抓取页面是否需要登录,这样做可以减小一些账号的压力
:return: 返回请求的数据,如果出现404或者403,或者是别的异常,都返回空字符串<|endoftext|> |
da2b523caccbbc5ddc2fa00cd4d0ac1cc089e2a45b926d87d5d9f7e3fab84341 | def strong_set(glasso, lagrange_cur, lagrange_new, grad, slope_estimate=1):
'\n Guess at active variables at \n lagrange value lagrange_new based on gradient\n at lagrange_cur.\n '
p = grad.shape[0]
value = strong_set_mixed_lasso(grad, lagrange_new, lagrange_cur, slope_estimate, glasso._l1_penalty, glasso._unpenalized, glasso._positive_part, glasso._nonnegative, glasso._groups, glasso._weight_array)
value = value.astype(np.bool)
return (value, selector(value, (p,))) | Guess at active variables at
lagrange value lagrange_new based on gradient
at lagrange_cur. | regreg/atoms/mixed_lasso.py | strong_set | vishalbelsare/regreg | 11 | python | def strong_set(glasso, lagrange_cur, lagrange_new, grad, slope_estimate=1):
'\n Guess at active variables at \n lagrange value lagrange_new based on gradient\n at lagrange_cur.\n '
p = grad.shape[0]
value = strong_set_mixed_lasso(grad, lagrange_new, lagrange_cur, slope_estimate, glasso._l1_penalty, glasso._unpenalized, glasso._positive_part, glasso._nonnegative, glasso._groups, glasso._weight_array)
value = value.astype(np.bool)
return (value, selector(value, (p,))) | def strong_set(glasso, lagrange_cur, lagrange_new, grad, slope_estimate=1):
'\n Guess at active variables at \n lagrange value lagrange_new based on gradient\n at lagrange_cur.\n '
p = grad.shape[0]
value = strong_set_mixed_lasso(grad, lagrange_new, lagrange_cur, slope_estimate, glasso._l1_penalty, glasso._unpenalized, glasso._positive_part, glasso._nonnegative, glasso._groups, glasso._weight_array)
value = value.astype(np.bool)
return (value, selector(value, (p,)))<|docstring|>Guess at active variables at
lagrange value lagrange_new based on gradient
at lagrange_cur.<|endoftext|> |
6e3f77607acdbd3713cfc4ff721e850b57f02508112a92d93758a011540d9fb3 | def check_KKT(glasso, grad, solution, lagrange, tol=0.01):
'\n Check whether (grad, solution) satisfy\n KKT conditions at a given tolerance.\n '
failing = check_KKT_mixed_lasso(grad, solution, lagrange, glasso._l1_penalty, glasso._unpenalized, glasso._positive_part, glasso._nonnegative, glasso._groups, glasso._weight_array, tol=tol)
return (failing > 0) | Check whether (grad, solution) satisfy
KKT conditions at a given tolerance. | regreg/atoms/mixed_lasso.py | check_KKT | vishalbelsare/regreg | 11 | python | def check_KKT(glasso, grad, solution, lagrange, tol=0.01):
'\n Check whether (grad, solution) satisfy\n KKT conditions at a given tolerance.\n '
failing = check_KKT_mixed_lasso(grad, solution, lagrange, glasso._l1_penalty, glasso._unpenalized, glasso._positive_part, glasso._nonnegative, glasso._groups, glasso._weight_array, tol=tol)
return (failing > 0) | def check_KKT(glasso, grad, solution, lagrange, tol=0.01):
'\n Check whether (grad, solution) satisfy\n KKT conditions at a given tolerance.\n '
failing = check_KKT_mixed_lasso(grad, solution, lagrange, glasso._l1_penalty, glasso._unpenalized, glasso._positive_part, glasso._nonnegative, glasso._groups, glasso._weight_array, tol=tol)
return (failing > 0)<|docstring|>Check whether (grad, solution) satisfy
KKT conditions at a given tolerance.<|endoftext|> |
95c9c7b97153c08961b3241d04001c3f88cb833f39340b22a9e2f10935afdffe | @doc_template_provider
def constraint(self, x, bound=None):
"\n Verify :math:`\\cdot %(objective)s \\leq \\lambda`, where :math:`\\lambda`\n is bound, :math:`\\alpha` is self.offset (if any).\n\n If True, returns 0, else returns np.inf.\n\n The class atom's constraint just returns the appropriate bound\n parameter for use by the subclasses.\n "
if (bound is None):
raise ValueError('bound must be suppled')
x_offset = self.apply_offset(x)
return (self.seminorm(x_offset) <= bound) | Verify :math:`\cdot %(objective)s \leq \lambda`, where :math:`\lambda`
is bound, :math:`\alpha` is self.offset (if any).
If True, returns 0, else returns np.inf.
The class atom's constraint just returns the appropriate bound
parameter for use by the subclasses. | regreg/atoms/mixed_lasso.py | constraint | vishalbelsare/regreg | 11 | python | @doc_template_provider
def constraint(self, x, bound=None):
"\n Verify :math:`\\cdot %(objective)s \\leq \\lambda`, where :math:`\\lambda`\n is bound, :math:`\\alpha` is self.offset (if any).\n\n If True, returns 0, else returns np.inf.\n\n The class atom's constraint just returns the appropriate bound\n parameter for use by the subclasses.\n "
if (bound is None):
raise ValueError('bound must be suppled')
x_offset = self.apply_offset(x)
return (self.seminorm(x_offset) <= bound) | @doc_template_provider
def constraint(self, x, bound=None):
"\n Verify :math:`\\cdot %(objective)s \\leq \\lambda`, where :math:`\\lambda`\n is bound, :math:`\\alpha` is self.offset (if any).\n\n If True, returns 0, else returns np.inf.\n\n The class atom's constraint just returns the appropriate bound\n parameter for use by the subclasses.\n "
if (bound is None):
raise ValueError('bound must be suppled')
x_offset = self.apply_offset(x)
return (self.seminorm(x_offset) <= bound)<|docstring|>Verify :math:`\cdot %(objective)s \leq \lambda`, where :math:`\lambda`
is bound, :math:`\alpha` is self.offset (if any).
If True, returns 0, else returns np.inf.
The class atom's constraint just returns the appropriate bound
parameter for use by the subclasses.<|endoftext|> |
8c3148972375d1c5f58ef077cedc5f9d9540ac9635d52328bdd7ad465e1a6a01 | def proximal(self, proxq, prox_control=None):
'\n The proximal operator. If the atom is in\n Lagrange mode, this has the form\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\lambda h(v+\\alpha) + \\langle v, \\eta \\rangle\n\n where :math:`\\alpha` is the offset of self.affine_transform and\n :math:`\\eta` is self.linear_term.\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\langle v, \\eta \\rangle \\text{s.t.} \\ h(v+\\alpha) \\leq \\lambda\n\n '
(offset, totalq) = (self.quadratic + proxq).recenter(self.offset)
if (totalq.coef == 0):
raise ValueError('lipschitz + quadratic coef must be positive')
prox_arg = ((- totalq.linear_term) / totalq.coef)
self._norms_cython[:] = 0
self._factors_cython[:] = 0
self._projection_cython[:] = 0
eta = mixed_lasso_lagrange_prox(prox_arg, self.lagrange, totalq.coef, self._l1_penalty, self._unpenalized, self._positive_part, self._nonnegative, self._groups, self._weight_array, self._norms_cython, self._factors_cython, self._projection_cython)
if (offset is None):
return eta
else:
return (eta + offset) | The proximal operator. If the atom is in
Lagrange mode, this has the form
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \lambda h(v+\alpha) + \langle v, \eta \rangle
where :math:`\alpha` is the offset of self.affine_transform and
:math:`\eta` is self.linear_term.
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \langle v, \eta \rangle \text{s.t.} \ h(v+\alpha) \leq \lambda | regreg/atoms/mixed_lasso.py | proximal | vishalbelsare/regreg | 11 | python | def proximal(self, proxq, prox_control=None):
'\n The proximal operator. If the atom is in\n Lagrange mode, this has the form\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\lambda h(v+\\alpha) + \\langle v, \\eta \\rangle\n\n where :math:`\\alpha` is the offset of self.affine_transform and\n :math:`\\eta` is self.linear_term.\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\langle v, \\eta \\rangle \\text{s.t.} \\ h(v+\\alpha) \\leq \\lambda\n\n '
(offset, totalq) = (self.quadratic + proxq).recenter(self.offset)
if (totalq.coef == 0):
raise ValueError('lipschitz + quadratic coef must be positive')
prox_arg = ((- totalq.linear_term) / totalq.coef)
self._norms_cython[:] = 0
self._factors_cython[:] = 0
self._projection_cython[:] = 0
eta = mixed_lasso_lagrange_prox(prox_arg, self.lagrange, totalq.coef, self._l1_penalty, self._unpenalized, self._positive_part, self._nonnegative, self._groups, self._weight_array, self._norms_cython, self._factors_cython, self._projection_cython)
if (offset is None):
return eta
else:
return (eta + offset) | def proximal(self, proxq, prox_control=None):
'\n The proximal operator. If the atom is in\n Lagrange mode, this has the form\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\lambda h(v+\\alpha) + \\langle v, \\eta \\rangle\n\n where :math:`\\alpha` is the offset of self.affine_transform and\n :math:`\\eta` is self.linear_term.\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\langle v, \\eta \\rangle \\text{s.t.} \\ h(v+\\alpha) \\leq \\lambda\n\n '
(offset, totalq) = (self.quadratic + proxq).recenter(self.offset)
if (totalq.coef == 0):
raise ValueError('lipschitz + quadratic coef must be positive')
prox_arg = ((- totalq.linear_term) / totalq.coef)
self._norms_cython[:] = 0
self._factors_cython[:] = 0
self._projection_cython[:] = 0
eta = mixed_lasso_lagrange_prox(prox_arg, self.lagrange, totalq.coef, self._l1_penalty, self._unpenalized, self._positive_part, self._nonnegative, self._groups, self._weight_array, self._norms_cython, self._factors_cython, self._projection_cython)
if (offset is None):
return eta
else:
return (eta + offset)<|docstring|>The proximal operator. If the atom is in
Lagrange mode, this has the form
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \lambda h(v+\alpha) + \langle v, \eta \rangle
where :math:`\alpha` is the offset of self.affine_transform and
:math:`\eta` is self.linear_term.
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \langle v, \eta \rangle \text{s.t.} \ h(v+\alpha) \leq \lambda<|endoftext|> |
148999401b8988ff170fd6a3e2e764b65dfe59d6ad8eed407d79a81d5468b49a | def proximal(self, proxq, prox_control=None):
'\n The proximal operator. If the atom is in\n Bound mode, this has the form\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\lambda h(v+\\alpha) + \\langle v, \\eta \\rangle\n\n where :math:`\\alpha` is the offset of self.affine_transform and\n :math:`\\eta` is self.linear_term.\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\langle v, \\eta \\rangle \\text{s.t.} \\ h(v+\\alpha) \\leq \\lambda\n\n '
(offset, totalq) = (self.quadratic + proxq).recenter(self.offset)
if (totalq.coef == 0):
raise ValueError('lipschitz + quadratic coef must be positive')
prox_arg = ((- totalq.linear_term) / totalq.coef)
self._norms_cython[:] = 0
self._factors_cython[:] = 0
self._projection_cython[:] = 0
eta = mixed_lasso_dual_bound_prox(prox_arg, self.bound, self._l1_penalty, self._unpenalized, self._positive_part, self._nonnegative, self._groups, self._weight_array, self._norms_cython, self._factors_cython, self._projection_cython)
if (offset is None):
return eta
else:
return (eta + offset) | The proximal operator. If the atom is in
Bound mode, this has the form
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \lambda h(v+\alpha) + \langle v, \eta \rangle
where :math:`\alpha` is the offset of self.affine_transform and
:math:`\eta` is self.linear_term.
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \langle v, \eta \rangle \text{s.t.} \ h(v+\alpha) \leq \lambda | regreg/atoms/mixed_lasso.py | proximal | vishalbelsare/regreg | 11 | python | def proximal(self, proxq, prox_control=None):
'\n The proximal operator. If the atom is in\n Bound mode, this has the form\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\lambda h(v+\\alpha) + \\langle v, \\eta \\rangle\n\n where :math:`\\alpha` is the offset of self.affine_transform and\n :math:`\\eta` is self.linear_term.\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\langle v, \\eta \\rangle \\text{s.t.} \\ h(v+\\alpha) \\leq \\lambda\n\n '
(offset, totalq) = (self.quadratic + proxq).recenter(self.offset)
if (totalq.coef == 0):
raise ValueError('lipschitz + quadratic coef must be positive')
prox_arg = ((- totalq.linear_term) / totalq.coef)
self._norms_cython[:] = 0
self._factors_cython[:] = 0
self._projection_cython[:] = 0
eta = mixed_lasso_dual_bound_prox(prox_arg, self.bound, self._l1_penalty, self._unpenalized, self._positive_part, self._nonnegative, self._groups, self._weight_array, self._norms_cython, self._factors_cython, self._projection_cython)
if (offset is None):
return eta
else:
return (eta + offset) | def proximal(self, proxq, prox_control=None):
'\n The proximal operator. If the atom is in\n Bound mode, this has the form\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\lambda h(v+\\alpha) + \\langle v, \\eta \\rangle\n\n where :math:`\\alpha` is the offset of self.affine_transform and\n :math:`\\eta` is self.linear_term.\n\n .. math::\n\n v^{\\lambda}(x) = \\text{argmin}_{v \\in \\mathbb{R}^p} \\frac{L}{2}\n \\|x-v\\|^2_2 + \\langle v, \\eta \\rangle \\text{s.t.} \\ h(v+\\alpha) \\leq \\lambda\n\n '
(offset, totalq) = (self.quadratic + proxq).recenter(self.offset)
if (totalq.coef == 0):
raise ValueError('lipschitz + quadratic coef must be positive')
prox_arg = ((- totalq.linear_term) / totalq.coef)
self._norms_cython[:] = 0
self._factors_cython[:] = 0
self._projection_cython[:] = 0
eta = mixed_lasso_dual_bound_prox(prox_arg, self.bound, self._l1_penalty, self._unpenalized, self._positive_part, self._nonnegative, self._groups, self._weight_array, self._norms_cython, self._factors_cython, self._projection_cython)
if (offset is None):
return eta
else:
return (eta + offset)<|docstring|>The proximal operator. If the atom is in
Bound mode, this has the form
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \lambda h(v+\alpha) + \langle v, \eta \rangle
where :math:`\alpha` is the offset of self.affine_transform and
:math:`\eta` is self.linear_term.
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^p} \frac{L}{2}
\|x-v\|^2_2 + \langle v, \eta \rangle \text{s.t.} \ h(v+\alpha) \leq \lambda<|endoftext|> |
5a1f637891bc1c68042a91a91cd518aa7149aa6ee18115df839a7d41beee7d56 | @assert_auth
@export_as_api
def all_instruments(type: str=None, date: Union[(str, datetime.datetime, datetime.date)]=None) -> pd.DataFrame:
'\n 获取simons目前支持的所有合约信息\n \n :param type: 需要查询合约类型,例如:type=\'CS\'代表股票。默认是所有类型\n :param date: 查询时间点\n \n 其中type参数传入的合约类型和对应的解释如下 \n \n \n ========================= ====================================================\n 合约类型 说明\n ========================= ====================================================\n CS Common Stock, 即股票\n ETF Exchange Traded Fund, 即交易所交易基金\n LOF Listed Open-Ended Fund,即上市型开放式基金\n INDX Index, 即指数\n Future Futures,即期货,包含股指、国债和商品期货\n ========================= ==================================================== \n :example:\n \n .. code-block:: python3\n \n >>> instrument_df = all_instruments(type="CS")\n >>> instrument_df.head()\n order_book_id symbol industry_code exchange status type listed_date\n 0 000001.XSHE 平安银行 J66 XSHE Active CS 1991-04-03\n 1 000002.XSHE 万科A K70 XSHE Active CS 1991-01-29\n 2 000004.XSHE 国农科技 I65 XSHE Active CS 1991-01-14\n 3 000005.XSHE 世纪星源 N77 XSHE Active CS 1990-12-10\n 4 000006.XSHE 深振业A K70 XSHE Active CS 1992-04-27\n '
date = convert_datetime_to_str(date)
return SimonsClient.instance().all_instruments(**locals()) | 获取simons目前支持的所有合约信息
:param type: 需要查询合约类型,例如:type='CS'代表股票。默认是所有类型
:param date: 查询时间点
其中type参数传入的合约类型和对应的解释如下
========================= ====================================================
合约类型 说明
========================= ====================================================
CS Common Stock, 即股票
ETF Exchange Traded Fund, 即交易所交易基金
LOF Listed Open-Ended Fund,即上市型开放式基金
INDX Index, 即指数
Future Futures,即期货,包含股指、国债和商品期货
========================= ====================================================
:example:
.. code-block:: python3
>>> instrument_df = all_instruments(type="CS")
>>> instrument_df.head()
order_book_id symbol industry_code exchange status type listed_date
0 000001.XSHE 平安银行 J66 XSHE Active CS 1991-04-03
1 000002.XSHE 万科A K70 XSHE Active CS 1991-01-29
2 000004.XSHE 国农科技 I65 XSHE Active CS 1991-01-14
3 000005.XSHE 世纪星源 N77 XSHE Active CS 1990-12-10
4 000006.XSHE 深振业A K70 XSHE Active CS 1992-04-27 | simonsc/api/base_api.py | all_instruments | jzkj-luolinh/simonsc | 0 | python | @assert_auth
@export_as_api
def all_instruments(type: str=None, date: Union[(str, datetime.datetime, datetime.date)]=None) -> pd.DataFrame:
'\n 获取simons目前支持的所有合约信息\n \n :param type: 需要查询合约类型,例如:type=\'CS\'代表股票。默认是所有类型\n :param date: 查询时间点\n \n 其中type参数传入的合约类型和对应的解释如下 \n \n \n ========================= ====================================================\n 合约类型 说明\n ========================= ====================================================\n CS Common Stock, 即股票\n ETF Exchange Traded Fund, 即交易所交易基金\n LOF Listed Open-Ended Fund,即上市型开放式基金\n INDX Index, 即指数\n Future Futures,即期货,包含股指、国债和商品期货\n ========================= ==================================================== \n :example:\n \n .. code-block:: python3\n \n >>> instrument_df = all_instruments(type="CS")\n >>> instrument_df.head()\n order_book_id symbol industry_code exchange status type listed_date\n 0 000001.XSHE 平安银行 J66 XSHE Active CS 1991-04-03\n 1 000002.XSHE 万科A K70 XSHE Active CS 1991-01-29\n 2 000004.XSHE 国农科技 I65 XSHE Active CS 1991-01-14\n 3 000005.XSHE 世纪星源 N77 XSHE Active CS 1990-12-10\n 4 000006.XSHE 深振业A K70 XSHE Active CS 1992-04-27\n '
date = convert_datetime_to_str(date)
return SimonsClient.instance().all_instruments(**locals()) | @assert_auth
@export_as_api
def all_instruments(type: str=None, date: Union[(str, datetime.datetime, datetime.date)]=None) -> pd.DataFrame:
'\n 获取simons目前支持的所有合约信息\n \n :param type: 需要查询合约类型,例如:type=\'CS\'代表股票。默认是所有类型\n :param date: 查询时间点\n \n 其中type参数传入的合约类型和对应的解释如下 \n \n \n ========================= ====================================================\n 合约类型 说明\n ========================= ====================================================\n CS Common Stock, 即股票\n ETF Exchange Traded Fund, 即交易所交易基金\n LOF Listed Open-Ended Fund,即上市型开放式基金\n INDX Index, 即指数\n Future Futures,即期货,包含股指、国债和商品期货\n ========================= ==================================================== \n :example:\n \n .. code-block:: python3\n \n >>> instrument_df = all_instruments(type="CS")\n >>> instrument_df.head()\n order_book_id symbol industry_code exchange status type listed_date\n 0 000001.XSHE 平安银行 J66 XSHE Active CS 1991-04-03\n 1 000002.XSHE 万科A K70 XSHE Active CS 1991-01-29\n 2 000004.XSHE 国农科技 I65 XSHE Active CS 1991-01-14\n 3 000005.XSHE 世纪星源 N77 XSHE Active CS 1990-12-10\n 4 000006.XSHE 深振业A K70 XSHE Active CS 1992-04-27\n '
date = convert_datetime_to_str(date)
return SimonsClient.instance().all_instruments(**locals())<|docstring|>获取simons目前支持的所有合约信息
:param type: 需要查询合约类型,例如:type='CS'代表股票。默认是所有类型
:param date: 查询时间点
其中type参数传入的合约类型和对应的解释如下
========================= ====================================================
合约类型 说明
========================= ====================================================
CS Common Stock, 即股票
ETF Exchange Traded Fund, 即交易所交易基金
LOF Listed Open-Ended Fund,即上市型开放式基金
INDX Index, 即指数
Future Futures,即期货,包含股指、国债和商品期货
========================= ====================================================
:example:
.. code-block:: python3
>>> instrument_df = all_instruments(type="CS")
>>> instrument_df.head()
order_book_id symbol industry_code exchange status type listed_date
0 000001.XSHE 平安银行 J66 XSHE Active CS 1991-04-03
1 000002.XSHE 万科A K70 XSHE Active CS 1991-01-29
2 000004.XSHE 国农科技 I65 XSHE Active CS 1991-01-14
3 000005.XSHE 世纪星源 N77 XSHE Active CS 1990-12-10
4 000006.XSHE 深振业A K70 XSHE Active CS 1992-04-27<|endoftext|> |
3cd1595a45d1e1eb207f29d88a9156b5838e45293472cd22da1b129808b8e100 | @assert_auth
@export_as_api
def history_bars(order_book_ids: str, bar_count: int, frequency: str, dt: datetime.datetime, fields: List[str]=None, skip_suspended: bool=True, include_now: bool=False, adjust_type: str='pre', adjust_orig: datetime.datetime=None) -> pd.DataFrame:
'获取指定合约的历史 k 线行情,支持任意日频率xd(1d,5d)和任意分钟频率xm(1m,3m,5m,15m)的历史数据。\n \n :param order_book_ids: 多个标的合约代码\n :param bar_count: 获取的历史数据数量,必填项\n :param frequency: 获取数据什么样的频率进行。\'1d\'或\'1m\'分别表示每日和每分钟,必填项\n :param fields: 返回数据字段。必填项。见下方列表。\n :param skip_suspended: 是否跳过停牌数据\n :param include_now: 是否包含当前数据\n :param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post\n ========================= ===================================================\n fields 字段名\n ========================= ===================================================\n datetime 时间戳\n open 开盘价\n high 最高价\n low 最低价\n close 收盘价\n volume 成交量\n total_turnover 成交额\n open_interest 持仓量(期货专用)\n basis_spread 期现差(股指期货专用)\n settlement 结算价(期货日线专用)\n prev_settlement 结算价(期货日线专用)\n ========================= ===================================================\n \n Example1::\n \n 获取中国平安和万科 2020-04-20之前10天的交易数据\n \n .. code-block:: python3\n \n import pandas as pd\n from simons.api import history_bars\n \n # \n >>> dt = pd.Timestamp("2020-04-20")\n >>> fields=["datetime","open","high","low","close","volume"]\n >>> data = history_bars(order_book_ids=["000001.XSHE", "000002.XSHE"], dt=dt, bar_count=10, frequency="1d", fields=fields)\n >>> print(data)\n \n open high low close volume\n order_book_id datetime \n 000001.XSHE 2020-04-07 12.89 12.94 12.81 12.88 87031371.0\n 2020-04-08 12.88 12.92 12.72 12.78 52871614.0\n 2020-04-09 12.88 12.89 12.72 12.74 40855377.0\n 2020-04-10 12.76 12.98 12.65 12.79 66667495.0\n 2020-04-13 12.67 12.71 12.47 12.59 44621440.0\n 2020-04-14 12.65 12.86 12.57 12.86 68608687.0\n 2020-04-15 12.86 12.93 12.78 12.87 65639640.0\n 2020-04-16 12.79 12.79 12.54 12.68 78915498.0\n 2020-04-17 12.77 13.04 12.65 12.89 133116477.0\n 2020-04-20 12.86 13.05 12.77 12.99 81845583.0\n 000002.XSHE 2020-04-07 27.34 27.42 26.80 27.07 67154006.0\n 2020-04-08 26.90 27.25 26.75 26.96 41251395.0\n 2020-04-09 27.10 27.16 26.60 26.69 38726254.0\n 2020-04-10 26.84 27.34 26.59 26.88 62460322.0\n 2020-04-13 26.74 27.13 26.61 27.04 43264902.0\n 2020-04-14 27.10 27.75 27.02 27.35 64241868.0\n 2020-04-15 27.20 27.23 26.55 26.70 70359257.0\n 2020-04-16 26.52 26.76 26.40 26.58 50238931.0\n 2020-04-17 26.78 27.03 26.55 26.72 83813322.0\n 2020-04-20 26.78 26.81 26.05 26.58 85012343.0\n \n '
dt = convert_datetime_to_str(dt)
return SimonsClient.instance().history_bars(**locals()) | 获取指定合约的历史 k 线行情,支持任意日频率xd(1d,5d)和任意分钟频率xm(1m,3m,5m,15m)的历史数据。
:param order_book_ids: 多个标的合约代码
:param bar_count: 获取的历史数据数量,必填项
:param frequency: 获取数据什么样的频率进行。'1d'或'1m'分别表示每日和每分钟,必填项
:param fields: 返回数据字段。必填项。见下方列表。
:param skip_suspended: 是否跳过停牌数据
:param include_now: 是否包含当前数据
:param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post
========================= ===================================================
fields 字段名
========================= ===================================================
datetime 时间戳
open 开盘价
high 最高价
low 最低价
close 收盘价
volume 成交量
total_turnover 成交额
open_interest 持仓量(期货专用)
basis_spread 期现差(股指期货专用)
settlement 结算价(期货日线专用)
prev_settlement 结算价(期货日线专用)
========================= ===================================================
Example1::
获取中国平安和万科 2020-04-20之前10天的交易数据
.. code-block:: python3
import pandas as pd
from simons.api import history_bars
#
>>> dt = pd.Timestamp("2020-04-20")
>>> fields=["datetime","open","high","low","close","volume"]
>>> data = history_bars(order_book_ids=["000001.XSHE", "000002.XSHE"], dt=dt, bar_count=10, frequency="1d", fields=fields)
>>> print(data)
open high low close volume
order_book_id datetime
000001.XSHE 2020-04-07 12.89 12.94 12.81 12.88 87031371.0
2020-04-08 12.88 12.92 12.72 12.78 52871614.0
2020-04-09 12.88 12.89 12.72 12.74 40855377.0
2020-04-10 12.76 12.98 12.65 12.79 66667495.0
2020-04-13 12.67 12.71 12.47 12.59 44621440.0
2020-04-14 12.65 12.86 12.57 12.86 68608687.0
2020-04-15 12.86 12.93 12.78 12.87 65639640.0
2020-04-16 12.79 12.79 12.54 12.68 78915498.0
2020-04-17 12.77 13.04 12.65 12.89 133116477.0
2020-04-20 12.86 13.05 12.77 12.99 81845583.0
000002.XSHE 2020-04-07 27.34 27.42 26.80 27.07 67154006.0
2020-04-08 26.90 27.25 26.75 26.96 41251395.0
2020-04-09 27.10 27.16 26.60 26.69 38726254.0
2020-04-10 26.84 27.34 26.59 26.88 62460322.0
2020-04-13 26.74 27.13 26.61 27.04 43264902.0
2020-04-14 27.10 27.75 27.02 27.35 64241868.0
2020-04-15 27.20 27.23 26.55 26.70 70359257.0
2020-04-16 26.52 26.76 26.40 26.58 50238931.0
2020-04-17 26.78 27.03 26.55 26.72 83813322.0
2020-04-20 26.78 26.81 26.05 26.58 85012343.0 | simonsc/api/base_api.py | history_bars | jzkj-luolinh/simonsc | 0 | python | @assert_auth
@export_as_api
def history_bars(order_book_ids: str, bar_count: int, frequency: str, dt: datetime.datetime, fields: List[str]=None, skip_suspended: bool=True, include_now: bool=False, adjust_type: str='pre', adjust_orig: datetime.datetime=None) -> pd.DataFrame:
'获取指定合约的历史 k 线行情,支持任意日频率xd(1d,5d)和任意分钟频率xm(1m,3m,5m,15m)的历史数据。\n \n :param order_book_ids: 多个标的合约代码\n :param bar_count: 获取的历史数据数量,必填项\n :param frequency: 获取数据什么样的频率进行。\'1d\'或\'1m\'分别表示每日和每分钟,必填项\n :param fields: 返回数据字段。必填项。见下方列表。\n :param skip_suspended: 是否跳过停牌数据\n :param include_now: 是否包含当前数据\n :param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post\n ========================= ===================================================\n fields 字段名\n ========================= ===================================================\n datetime 时间戳\n open 开盘价\n high 最高价\n low 最低价\n close 收盘价\n volume 成交量\n total_turnover 成交额\n open_interest 持仓量(期货专用)\n basis_spread 期现差(股指期货专用)\n settlement 结算价(期货日线专用)\n prev_settlement 结算价(期货日线专用)\n ========================= ===================================================\n \n Example1::\n \n 获取中国平安和万科 2020-04-20之前10天的交易数据\n \n .. code-block:: python3\n \n import pandas as pd\n from simons.api import history_bars\n \n # \n >>> dt = pd.Timestamp("2020-04-20")\n >>> fields=["datetime","open","high","low","close","volume"]\n >>> data = history_bars(order_book_ids=["000001.XSHE", "000002.XSHE"], dt=dt, bar_count=10, frequency="1d", fields=fields)\n >>> print(data)\n \n open high low close volume\n order_book_id datetime \n 000001.XSHE 2020-04-07 12.89 12.94 12.81 12.88 87031371.0\n 2020-04-08 12.88 12.92 12.72 12.78 52871614.0\n 2020-04-09 12.88 12.89 12.72 12.74 40855377.0\n 2020-04-10 12.76 12.98 12.65 12.79 66667495.0\n 2020-04-13 12.67 12.71 12.47 12.59 44621440.0\n 2020-04-14 12.65 12.86 12.57 12.86 68608687.0\n 2020-04-15 12.86 12.93 12.78 12.87 65639640.0\n 2020-04-16 12.79 12.79 12.54 12.68 78915498.0\n 2020-04-17 12.77 13.04 12.65 12.89 133116477.0\n 2020-04-20 12.86 13.05 12.77 12.99 81845583.0\n 000002.XSHE 2020-04-07 27.34 27.42 26.80 27.07 67154006.0\n 2020-04-08 26.90 27.25 26.75 26.96 41251395.0\n 2020-04-09 27.10 27.16 26.60 26.69 38726254.0\n 2020-04-10 26.84 27.34 26.59 26.88 62460322.0\n 2020-04-13 26.74 27.13 26.61 27.04 43264902.0\n 2020-04-14 27.10 27.75 27.02 27.35 64241868.0\n 2020-04-15 27.20 27.23 26.55 26.70 70359257.0\n 2020-04-16 26.52 26.76 26.40 26.58 50238931.0\n 2020-04-17 26.78 27.03 26.55 26.72 83813322.0\n 2020-04-20 26.78 26.81 26.05 26.58 85012343.0\n \n '
dt = convert_datetime_to_str(dt)
return SimonsClient.instance().history_bars(**locals()) | @assert_auth
@export_as_api
def history_bars(order_book_ids: str, bar_count: int, frequency: str, dt: datetime.datetime, fields: List[str]=None, skip_suspended: bool=True, include_now: bool=False, adjust_type: str='pre', adjust_orig: datetime.datetime=None) -> pd.DataFrame:
'获取指定合约的历史 k 线行情,支持任意日频率xd(1d,5d)和任意分钟频率xm(1m,3m,5m,15m)的历史数据。\n \n :param order_book_ids: 多个标的合约代码\n :param bar_count: 获取的历史数据数量,必填项\n :param frequency: 获取数据什么样的频率进行。\'1d\'或\'1m\'分别表示每日和每分钟,必填项\n :param fields: 返回数据字段。必填项。见下方列表。\n :param skip_suspended: 是否跳过停牌数据\n :param include_now: 是否包含当前数据\n :param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post\n ========================= ===================================================\n fields 字段名\n ========================= ===================================================\n datetime 时间戳\n open 开盘价\n high 最高价\n low 最低价\n close 收盘价\n volume 成交量\n total_turnover 成交额\n open_interest 持仓量(期货专用)\n basis_spread 期现差(股指期货专用)\n settlement 结算价(期货日线专用)\n prev_settlement 结算价(期货日线专用)\n ========================= ===================================================\n \n Example1::\n \n 获取中国平安和万科 2020-04-20之前10天的交易数据\n \n .. code-block:: python3\n \n import pandas as pd\n from simons.api import history_bars\n \n # \n >>> dt = pd.Timestamp("2020-04-20")\n >>> fields=["datetime","open","high","low","close","volume"]\n >>> data = history_bars(order_book_ids=["000001.XSHE", "000002.XSHE"], dt=dt, bar_count=10, frequency="1d", fields=fields)\n >>> print(data)\n \n open high low close volume\n order_book_id datetime \n 000001.XSHE 2020-04-07 12.89 12.94 12.81 12.88 87031371.0\n 2020-04-08 12.88 12.92 12.72 12.78 52871614.0\n 2020-04-09 12.88 12.89 12.72 12.74 40855377.0\n 2020-04-10 12.76 12.98 12.65 12.79 66667495.0\n 2020-04-13 12.67 12.71 12.47 12.59 44621440.0\n 2020-04-14 12.65 12.86 12.57 12.86 68608687.0\n 2020-04-15 12.86 12.93 12.78 12.87 65639640.0\n 2020-04-16 12.79 12.79 12.54 12.68 78915498.0\n 2020-04-17 12.77 13.04 12.65 12.89 133116477.0\n 2020-04-20 12.86 13.05 12.77 12.99 81845583.0\n 000002.XSHE 2020-04-07 27.34 27.42 26.80 27.07 67154006.0\n 2020-04-08 26.90 27.25 26.75 26.96 41251395.0\n 2020-04-09 27.10 27.16 26.60 26.69 38726254.0\n 2020-04-10 26.84 27.34 26.59 26.88 62460322.0\n 2020-04-13 26.74 27.13 26.61 27.04 43264902.0\n 2020-04-14 27.10 27.75 27.02 27.35 64241868.0\n 2020-04-15 27.20 27.23 26.55 26.70 70359257.0\n 2020-04-16 26.52 26.76 26.40 26.58 50238931.0\n 2020-04-17 26.78 27.03 26.55 26.72 83813322.0\n 2020-04-20 26.78 26.81 26.05 26.58 85012343.0\n \n '
dt = convert_datetime_to_str(dt)
return SimonsClient.instance().history_bars(**locals())<|docstring|>获取指定合约的历史 k 线行情,支持任意日频率xd(1d,5d)和任意分钟频率xm(1m,3m,5m,15m)的历史数据。
:param order_book_ids: 多个标的合约代码
:param bar_count: 获取的历史数据数量,必填项
:param frequency: 获取数据什么样的频率进行。'1d'或'1m'分别表示每日和每分钟,必填项
:param fields: 返回数据字段。必填项。见下方列表。
:param skip_suspended: 是否跳过停牌数据
:param include_now: 是否包含当前数据
:param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post
========================= ===================================================
fields 字段名
========================= ===================================================
datetime 时间戳
open 开盘价
high 最高价
low 最低价
close 收盘价
volume 成交量
total_turnover 成交额
open_interest 持仓量(期货专用)
basis_spread 期现差(股指期货专用)
settlement 结算价(期货日线专用)
prev_settlement 结算价(期货日线专用)
========================= ===================================================
Example1::
获取中国平安和万科 2020-04-20之前10天的交易数据
.. code-block:: python3
import pandas as pd
from simons.api import history_bars
#
>>> dt = pd.Timestamp("2020-04-20")
>>> fields=["datetime","open","high","low","close","volume"]
>>> data = history_bars(order_book_ids=["000001.XSHE", "000002.XSHE"], dt=dt, bar_count=10, frequency="1d", fields=fields)
>>> print(data)
open high low close volume
order_book_id datetime
000001.XSHE 2020-04-07 12.89 12.94 12.81 12.88 87031371.0
2020-04-08 12.88 12.92 12.72 12.78 52871614.0
2020-04-09 12.88 12.89 12.72 12.74 40855377.0
2020-04-10 12.76 12.98 12.65 12.79 66667495.0
2020-04-13 12.67 12.71 12.47 12.59 44621440.0
2020-04-14 12.65 12.86 12.57 12.86 68608687.0
2020-04-15 12.86 12.93 12.78 12.87 65639640.0
2020-04-16 12.79 12.79 12.54 12.68 78915498.0
2020-04-17 12.77 13.04 12.65 12.89 133116477.0
2020-04-20 12.86 13.05 12.77 12.99 81845583.0
000002.XSHE 2020-04-07 27.34 27.42 26.80 27.07 67154006.0
2020-04-08 26.90 27.25 26.75 26.96 41251395.0
2020-04-09 27.10 27.16 26.60 26.69 38726254.0
2020-04-10 26.84 27.34 26.59 26.88 62460322.0
2020-04-13 26.74 27.13 26.61 27.04 43264902.0
2020-04-14 27.10 27.75 27.02 27.35 64241868.0
2020-04-15 27.20 27.23 26.55 26.70 70359257.0
2020-04-16 26.52 26.76 26.40 26.58 50238931.0
2020-04-17 26.78 27.03 26.55 26.72 83813322.0
2020-04-20 26.78 26.81 26.05 26.58 85012343.0<|endoftext|> |
d3e35f88ce8228faf2bd2e4015340f4e6252ac3117ab717bc8b1ea0a4b326d0e | @assert_auth
@export_as_api
def history_snapshot(order_book_id: str, bar_count: int, dt: datetime.datetime, fields: List[str]=None, skip_suspended: bool=True, include_now: bool=False, adjust_type: str='none', adjust_orig: datetime=None) -> pd.DataFrame:
'获取指定合约的历史快照数据\n \n :param order_book_id: 合约代码\n :param bar_count: 获取的历史数据数量,必填项\n :param fields: 返回数据字段。必填项。见下方列表。\n :param skip_suspended: 是否跳过停牌数据\n :param include_now: 是否包含当前数据\n :param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post\n \n .. admonition:: 可支持的数据字段\n :class: dropdown, note\n \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | fields | 中文名 | dtype | 是否是原始字段 | 注释 | \n +========================================+==========================+=========+================+=======================================================+\n | date | 交易归属日期 | <i8 | Y | yyyymmdd | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmss,由交易日当天日期和数据生成时间(交易s |\n | | | | |(交易所直接下发的)合成。 | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | last | 最新成交价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_price_10 ~ sell_price_1 | 第10 ~ 1档委托卖出价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_price_1 ~ buy_price_10 | 第1 ~ 10档委托买入价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_volume_10 ~ sell_volume_1 | 第10 ~ 1档申卖量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_volume_1 ~ buy_volume_10 | 第1 ~ 10档申买量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | num_sell_trades_10 ~ num_sell_trades_1 | 委卖笔数10 ~ 委卖笔数1 | <f8 | Y | 委卖价1 ~ 10的委托总比数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | num_buy_trades_1 ~ num_buy_trades_10 | 委买笔数1 ~ 委买笔数10 | <f8 | Y | 委买价1 ~ 10的委托总比数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_num_trades | 成交总笔数 | <f8 | Y | 开盘至当前时刻的累计成交笔数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_num_trades | 分笔期间成交笔数 | <f8 | 上交所:N | 当前成交总比数(total_num_trades_t) - 上一记录的成 |\n | | | | 深交所:Y | 交总比数(total_num_trades_t-1);首条记录取当前成交 |\n | | | | | 总比 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_volume | 成交总量 | <f8 | Y | 开盘至当前时刻的累计成交量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_volume | 分笔期间成交量 | <f8 | 上交所:N | 当前成交总量(total_volume_t ) - 上一记录的成 |\n | | | | 深交所:Y | 交总量(total_volume_t-1);首条记录取当前成交总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_turnover | 成交总额 | <f8 | Y | 开盘至当前时刻的累计成交额 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_turnover | 分笔期间成交额 | <f8 | 上交所:N | 当前成交总额(total_turnover_t) - 上一记录的成交总额 |\n | | | | 深交所:Y | (total_turnover_t-1);首条记录取当前成交总额 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_sell_order_volume | 委托卖出总量 | <f8 | Y | 是指直接到切片时间的还存在的, 所有委托卖单总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_buy_order_volume | 委托买入总量 | <f8 | Y | 是指直接到切片时间的还存在的,所有委托买单总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | wt_avg_sell_price | 加权平均委卖价格 | <f8 | Y | 单位:元 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | wt_avg_buy_price | 加权平均委买价格 | <f8 | Y | 单位:元 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | prev_close | 昨收盘价 | <f8 | Y | 上一交易日的收盘价,上交所的收盘价格是最后一分钟的成 |\n | | | | | 交均价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | open | 开盘价 | <f8 | Y | 当日开盘价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | high | 最高价 | <f8 | Y | 开盘至当前时刻所出现的最高成交价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | low | 最低价 | <f8 | Y | 开盘至当前时刻所出现的最低成交价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | daily_close | 今日收盘价 | <f8 | Y | 该交易日的收盘价,上交所的收盘价格是最后一分钟的成交均|\n | | | | | 价(在最后一笔行情上更新,其余行值为0) | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_level_no | 申卖价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_level_no | 申买价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n\n \n bbla \n \n .. code-block:: python3\n \n from simonsc.api import history_snapshot\n \n >>> dt = pd.Timestamp("2020-07-24 14:55:00")\n >>> fields=["datetime","last","buy_price_1","buy_volume_1","sell_price_1","sell_volume_1","sell_price_10"]\n >>> data = history_snapshot(order_book_id="600446.XSHG", dt=dt, bar_count=10, fields=fields)\n >>> print(data)\n \n last buy_price_1 buy_volume_1 sell_price_1 sell_volume_1 sell_price_10\n order_book_id datetime \n 600446.XSHG 2020-07-24 14:54:32 19.12 19.12 1100. 19.13, 1500. 19.26\n 2020-07-24 14:54:35 19.12 19.11 6600. 19.12, 57900. 19.25\n 2020-07-24 14:54:38 19.12 19.11 6800. 19.12, 57800. 19.25\n 2020-07-24 14:54:41 19.12 19.11 36400. 19.12, 57200. 19.25\n 2020-07-24 14:54:44 19.11 19.11 21200. 19.12, 55900. 19.25\n 2020-07-24 14:54:47 19.11 19.11 7400. 19.12, 52200. 19.25\n 2020-07-24 14:54:50 19.12 19.11 4700. 19.12, 40800. 19.25\n 2020-07-24 14:54:53 19.12 19.12 41800. 19.13, 9700. 19.26\n 2020-07-24 14:54:56 19.12 19.12 40900. 19.13, 9700. 19.26\n 2020-07-24 14:54:59 19.13 19.12 44000. 19.13, 9600. 19.26\n '
dt = convert_datetime_to_str(dt)
return SimonsClient.instance().history_snapshot(**locals()) | 获取指定合约的历史快照数据
:param order_book_id: 合约代码
:param bar_count: 获取的历史数据数量,必填项
:param fields: 返回数据字段。必填项。见下方列表。
:param skip_suspended: 是否跳过停牌数据
:param include_now: 是否包含当前数据
:param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post
.. admonition:: 可支持的数据字段
:class: dropdown, note
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| fields | 中文名 | dtype | 是否是原始字段 | 注释 |
+========================================+==========================+=========+================+=======================================================+
| date | 交易归属日期 | <i8 | Y | yyyymmdd |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmss,由交易日当天日期和数据生成时间(交易s |
| | | | |(交易所直接下发的)合成。 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| last | 最新成交价 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| sell_price_10 ~ sell_price_1 | 第10 ~ 1档委托卖出价 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_price_1 ~ buy_price_10 | 第1 ~ 10档委托买入价 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| sell_volume_10 ~ sell_volume_1 | 第10 ~ 1档申卖量 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_volume_1 ~ buy_volume_10 | 第1 ~ 10档申买量 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| num_sell_trades_10 ~ num_sell_trades_1 | 委卖笔数10 ~ 委卖笔数1 | <f8 | Y | 委卖价1 ~ 10的委托总比数 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| num_buy_trades_1 ~ num_buy_trades_10 | 委买笔数1 ~ 委买笔数10 | <f8 | Y | 委买价1 ~ 10的委托总比数 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_num_trades | 成交总笔数 | <f8 | Y | 开盘至当前时刻的累计成交笔数 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| current_num_trades | 分笔期间成交笔数 | <f8 | 上交所:N | 当前成交总比数(total_num_trades_t) - 上一记录的成 |
| | | | 深交所:Y | 交总比数(total_num_trades_t-1);首条记录取当前成交 |
| | | | | 总比 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_volume | 成交总量 | <f8 | Y | 开盘至当前时刻的累计成交量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| current_volume | 分笔期间成交量 | <f8 | 上交所:N | 当前成交总量(total_volume_t ) - 上一记录的成 |
| | | | 深交所:Y | 交总量(total_volume_t-1);首条记录取当前成交总量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_turnover | 成交总额 | <f8 | Y | 开盘至当前时刻的累计成交额 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| current_turnover | 分笔期间成交额 | <f8 | 上交所:N | 当前成交总额(total_turnover_t) - 上一记录的成交总额 |
| | | | 深交所:Y | (total_turnover_t-1);首条记录取当前成交总额 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_sell_order_volume | 委托卖出总量 | <f8 | Y | 是指直接到切片时间的还存在的, 所有委托卖单总量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_buy_order_volume | 委托买入总量 | <f8 | Y | 是指直接到切片时间的还存在的,所有委托买单总量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| wt_avg_sell_price | 加权平均委卖价格 | <f8 | Y | 单位:元 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| wt_avg_buy_price | 加权平均委买价格 | <f8 | Y | 单位:元 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| prev_close | 昨收盘价 | <f8 | Y | 上一交易日的收盘价,上交所的收盘价格是最后一分钟的成 |
| | | | | 交均价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| open | 开盘价 | <f8 | Y | 当日开盘价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| high | 最高价 | <f8 | Y | 开盘至当前时刻所出现的最高成交价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| low | 最低价 | <f8 | Y | 开盘至当前时刻所出现的最低成交价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| daily_close | 今日收盘价 | <f8 | Y | 该交易日的收盘价,上交所的收盘价格是最后一分钟的成交均|
| | | | | 价(在最后一笔行情上更新,其余行值为0) |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| sell_level_no | 申卖价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_level_no | 申买价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
bbla
.. code-block:: python3
from simonsc.api import history_snapshot
>>> dt = pd.Timestamp("2020-07-24 14:55:00")
>>> fields=["datetime","last","buy_price_1","buy_volume_1","sell_price_1","sell_volume_1","sell_price_10"]
>>> data = history_snapshot(order_book_id="600446.XSHG", dt=dt, bar_count=10, fields=fields)
>>> print(data)
last buy_price_1 buy_volume_1 sell_price_1 sell_volume_1 sell_price_10
order_book_id datetime
600446.XSHG 2020-07-24 14:54:32 19.12 19.12 1100. 19.13, 1500. 19.26
2020-07-24 14:54:35 19.12 19.11 6600. 19.12, 57900. 19.25
2020-07-24 14:54:38 19.12 19.11 6800. 19.12, 57800. 19.25
2020-07-24 14:54:41 19.12 19.11 36400. 19.12, 57200. 19.25
2020-07-24 14:54:44 19.11 19.11 21200. 19.12, 55900. 19.25
2020-07-24 14:54:47 19.11 19.11 7400. 19.12, 52200. 19.25
2020-07-24 14:54:50 19.12 19.11 4700. 19.12, 40800. 19.25
2020-07-24 14:54:53 19.12 19.12 41800. 19.13, 9700. 19.26
2020-07-24 14:54:56 19.12 19.12 40900. 19.13, 9700. 19.26
2020-07-24 14:54:59 19.13 19.12 44000. 19.13, 9600. 19.26 | simonsc/api/base_api.py | history_snapshot | jzkj-luolinh/simonsc | 0 | python | @assert_auth
@export_as_api
def history_snapshot(order_book_id: str, bar_count: int, dt: datetime.datetime, fields: List[str]=None, skip_suspended: bool=True, include_now: bool=False, adjust_type: str='none', adjust_orig: datetime=None) -> pd.DataFrame:
'获取指定合约的历史快照数据\n \n :param order_book_id: 合约代码\n :param bar_count: 获取的历史数据数量,必填项\n :param fields: 返回数据字段。必填项。见下方列表。\n :param skip_suspended: 是否跳过停牌数据\n :param include_now: 是否包含当前数据\n :param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post\n \n .. admonition:: 可支持的数据字段\n :class: dropdown, note\n \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | fields | 中文名 | dtype | 是否是原始字段 | 注释 | \n +========================================+==========================+=========+================+=======================================================+\n | date | 交易归属日期 | <i8 | Y | yyyymmdd | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmss,由交易日当天日期和数据生成时间(交易s |\n | | | | |(交易所直接下发的)合成。 | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | last | 最新成交价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_price_10 ~ sell_price_1 | 第10 ~ 1档委托卖出价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_price_1 ~ buy_price_10 | 第1 ~ 10档委托买入价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_volume_10 ~ sell_volume_1 | 第10 ~ 1档申卖量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_volume_1 ~ buy_volume_10 | 第1 ~ 10档申买量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | num_sell_trades_10 ~ num_sell_trades_1 | 委卖笔数10 ~ 委卖笔数1 | <f8 | Y | 委卖价1 ~ 10的委托总比数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | num_buy_trades_1 ~ num_buy_trades_10 | 委买笔数1 ~ 委买笔数10 | <f8 | Y | 委买价1 ~ 10的委托总比数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_num_trades | 成交总笔数 | <f8 | Y | 开盘至当前时刻的累计成交笔数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_num_trades | 分笔期间成交笔数 | <f8 | 上交所:N | 当前成交总比数(total_num_trades_t) - 上一记录的成 |\n | | | | 深交所:Y | 交总比数(total_num_trades_t-1);首条记录取当前成交 |\n | | | | | 总比 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_volume | 成交总量 | <f8 | Y | 开盘至当前时刻的累计成交量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_volume | 分笔期间成交量 | <f8 | 上交所:N | 当前成交总量(total_volume_t ) - 上一记录的成 |\n | | | | 深交所:Y | 交总量(total_volume_t-1);首条记录取当前成交总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_turnover | 成交总额 | <f8 | Y | 开盘至当前时刻的累计成交额 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_turnover | 分笔期间成交额 | <f8 | 上交所:N | 当前成交总额(total_turnover_t) - 上一记录的成交总额 |\n | | | | 深交所:Y | (total_turnover_t-1);首条记录取当前成交总额 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_sell_order_volume | 委托卖出总量 | <f8 | Y | 是指直接到切片时间的还存在的, 所有委托卖单总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_buy_order_volume | 委托买入总量 | <f8 | Y | 是指直接到切片时间的还存在的,所有委托买单总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | wt_avg_sell_price | 加权平均委卖价格 | <f8 | Y | 单位:元 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | wt_avg_buy_price | 加权平均委买价格 | <f8 | Y | 单位:元 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | prev_close | 昨收盘价 | <f8 | Y | 上一交易日的收盘价,上交所的收盘价格是最后一分钟的成 |\n | | | | | 交均价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | open | 开盘价 | <f8 | Y | 当日开盘价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | high | 最高价 | <f8 | Y | 开盘至当前时刻所出现的最高成交价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | low | 最低价 | <f8 | Y | 开盘至当前时刻所出现的最低成交价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | daily_close | 今日收盘价 | <f8 | Y | 该交易日的收盘价,上交所的收盘价格是最后一分钟的成交均|\n | | | | | 价(在最后一笔行情上更新,其余行值为0) | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_level_no | 申卖价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_level_no | 申买价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n\n \n bbla \n \n .. code-block:: python3\n \n from simonsc.api import history_snapshot\n \n >>> dt = pd.Timestamp("2020-07-24 14:55:00")\n >>> fields=["datetime","last","buy_price_1","buy_volume_1","sell_price_1","sell_volume_1","sell_price_10"]\n >>> data = history_snapshot(order_book_id="600446.XSHG", dt=dt, bar_count=10, fields=fields)\n >>> print(data)\n \n last buy_price_1 buy_volume_1 sell_price_1 sell_volume_1 sell_price_10\n order_book_id datetime \n 600446.XSHG 2020-07-24 14:54:32 19.12 19.12 1100. 19.13, 1500. 19.26\n 2020-07-24 14:54:35 19.12 19.11 6600. 19.12, 57900. 19.25\n 2020-07-24 14:54:38 19.12 19.11 6800. 19.12, 57800. 19.25\n 2020-07-24 14:54:41 19.12 19.11 36400. 19.12, 57200. 19.25\n 2020-07-24 14:54:44 19.11 19.11 21200. 19.12, 55900. 19.25\n 2020-07-24 14:54:47 19.11 19.11 7400. 19.12, 52200. 19.25\n 2020-07-24 14:54:50 19.12 19.11 4700. 19.12, 40800. 19.25\n 2020-07-24 14:54:53 19.12 19.12 41800. 19.13, 9700. 19.26\n 2020-07-24 14:54:56 19.12 19.12 40900. 19.13, 9700. 19.26\n 2020-07-24 14:54:59 19.13 19.12 44000. 19.13, 9600. 19.26\n '
dt = convert_datetime_to_str(dt)
return SimonsClient.instance().history_snapshot(**locals()) | @assert_auth
@export_as_api
def history_snapshot(order_book_id: str, bar_count: int, dt: datetime.datetime, fields: List[str]=None, skip_suspended: bool=True, include_now: bool=False, adjust_type: str='none', adjust_orig: datetime=None) -> pd.DataFrame:
'获取指定合约的历史快照数据\n \n :param order_book_id: 合约代码\n :param bar_count: 获取的历史数据数量,必填项\n :param fields: 返回数据字段。必填项。见下方列表。\n :param skip_suspended: 是否跳过停牌数据\n :param include_now: 是否包含当前数据\n :param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post\n \n .. admonition:: 可支持的数据字段\n :class: dropdown, note\n \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | fields | 中文名 | dtype | 是否是原始字段 | 注释 | \n +========================================+==========================+=========+================+=======================================================+\n | date | 交易归属日期 | <i8 | Y | yyyymmdd | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmss,由交易日当天日期和数据生成时间(交易s |\n | | | | |(交易所直接下发的)合成。 | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | last | 最新成交价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_price_10 ~ sell_price_1 | 第10 ~ 1档委托卖出价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_price_1 ~ buy_price_10 | 第1 ~ 10档委托买入价 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_volume_10 ~ sell_volume_1 | 第10 ~ 1档申卖量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_volume_1 ~ buy_volume_10 | 第1 ~ 10档申买量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | num_sell_trades_10 ~ num_sell_trades_1 | 委卖笔数10 ~ 委卖笔数1 | <f8 | Y | 委卖价1 ~ 10的委托总比数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | num_buy_trades_1 ~ num_buy_trades_10 | 委买笔数1 ~ 委买笔数10 | <f8 | Y | 委买价1 ~ 10的委托总比数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_num_trades | 成交总笔数 | <f8 | Y | 开盘至当前时刻的累计成交笔数 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_num_trades | 分笔期间成交笔数 | <f8 | 上交所:N | 当前成交总比数(total_num_trades_t) - 上一记录的成 |\n | | | | 深交所:Y | 交总比数(total_num_trades_t-1);首条记录取当前成交 |\n | | | | | 总比 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_volume | 成交总量 | <f8 | Y | 开盘至当前时刻的累计成交量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_volume | 分笔期间成交量 | <f8 | 上交所:N | 当前成交总量(total_volume_t ) - 上一记录的成 |\n | | | | 深交所:Y | 交总量(total_volume_t-1);首条记录取当前成交总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_turnover | 成交总额 | <f8 | Y | 开盘至当前时刻的累计成交额 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | current_turnover | 分笔期间成交额 | <f8 | 上交所:N | 当前成交总额(total_turnover_t) - 上一记录的成交总额 |\n | | | | 深交所:Y | (total_turnover_t-1);首条记录取当前成交总额 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_sell_order_volume | 委托卖出总量 | <f8 | Y | 是指直接到切片时间的还存在的, 所有委托卖单总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | total_buy_order_volume | 委托买入总量 | <f8 | Y | 是指直接到切片时间的还存在的,所有委托买单总量 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | wt_avg_sell_price | 加权平均委卖价格 | <f8 | Y | 单位:元 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | wt_avg_buy_price | 加权平均委买价格 | <f8 | Y | 单位:元 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | prev_close | 昨收盘价 | <f8 | Y | 上一交易日的收盘价,上交所的收盘价格是最后一分钟的成 |\n | | | | | 交均价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | open | 开盘价 | <f8 | Y | 当日开盘价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | high | 最高价 | <f8 | Y | 开盘至当前时刻所出现的最高成交价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | low | 最低价 | <f8 | Y | 开盘至当前时刻所出现的最低成交价 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | daily_close | 今日收盘价 | <f8 | Y | 该交易日的收盘价,上交所的收盘价格是最后一分钟的成交均|\n | | | | | 价(在最后一笔行情上更新,其余行值为0) | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | sell_level_no | 申卖价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_level_no | 申买价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n\n \n bbla \n \n .. code-block:: python3\n \n from simonsc.api import history_snapshot\n \n >>> dt = pd.Timestamp("2020-07-24 14:55:00")\n >>> fields=["datetime","last","buy_price_1","buy_volume_1","sell_price_1","sell_volume_1","sell_price_10"]\n >>> data = history_snapshot(order_book_id="600446.XSHG", dt=dt, bar_count=10, fields=fields)\n >>> print(data)\n \n last buy_price_1 buy_volume_1 sell_price_1 sell_volume_1 sell_price_10\n order_book_id datetime \n 600446.XSHG 2020-07-24 14:54:32 19.12 19.12 1100. 19.13, 1500. 19.26\n 2020-07-24 14:54:35 19.12 19.11 6600. 19.12, 57900. 19.25\n 2020-07-24 14:54:38 19.12 19.11 6800. 19.12, 57800. 19.25\n 2020-07-24 14:54:41 19.12 19.11 36400. 19.12, 57200. 19.25\n 2020-07-24 14:54:44 19.11 19.11 21200. 19.12, 55900. 19.25\n 2020-07-24 14:54:47 19.11 19.11 7400. 19.12, 52200. 19.25\n 2020-07-24 14:54:50 19.12 19.11 4700. 19.12, 40800. 19.25\n 2020-07-24 14:54:53 19.12 19.12 41800. 19.13, 9700. 19.26\n 2020-07-24 14:54:56 19.12 19.12 40900. 19.13, 9700. 19.26\n 2020-07-24 14:54:59 19.13 19.12 44000. 19.13, 9600. 19.26\n '
dt = convert_datetime_to_str(dt)
return SimonsClient.instance().history_snapshot(**locals())<|docstring|>获取指定合约的历史快照数据
:param order_book_id: 合约代码
:param bar_count: 获取的历史数据数量,必填项
:param fields: 返回数据字段。必填项。见下方列表。
:param skip_suspended: 是否跳过停牌数据
:param include_now: 是否包含当前数据
:param adjust_type: 复权类型,默认为前复权 pre;可选 pre, none, post
.. admonition:: 可支持的数据字段
:class: dropdown, note
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| fields | 中文名 | dtype | 是否是原始字段 | 注释 |
+========================================+==========================+=========+================+=======================================================+
| date | 交易归属日期 | <i8 | Y | yyyymmdd |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmss,由交易日当天日期和数据生成时间(交易s |
| | | | |(交易所直接下发的)合成。 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| last | 最新成交价 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| sell_price_10 ~ sell_price_1 | 第10 ~ 1档委托卖出价 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_price_1 ~ buy_price_10 | 第1 ~ 10档委托买入价 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| sell_volume_10 ~ sell_volume_1 | 第10 ~ 1档申卖量 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_volume_1 ~ buy_volume_10 | 第1 ~ 10档申买量 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| num_sell_trades_10 ~ num_sell_trades_1 | 委卖笔数10 ~ 委卖笔数1 | <f8 | Y | 委卖价1 ~ 10的委托总比数 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| num_buy_trades_1 ~ num_buy_trades_10 | 委买笔数1 ~ 委买笔数10 | <f8 | Y | 委买价1 ~ 10的委托总比数 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_num_trades | 成交总笔数 | <f8 | Y | 开盘至当前时刻的累计成交笔数 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| current_num_trades | 分笔期间成交笔数 | <f8 | 上交所:N | 当前成交总比数(total_num_trades_t) - 上一记录的成 |
| | | | 深交所:Y | 交总比数(total_num_trades_t-1);首条记录取当前成交 |
| | | | | 总比 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_volume | 成交总量 | <f8 | Y | 开盘至当前时刻的累计成交量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| current_volume | 分笔期间成交量 | <f8 | 上交所:N | 当前成交总量(total_volume_t ) - 上一记录的成 |
| | | | 深交所:Y | 交总量(total_volume_t-1);首条记录取当前成交总量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_turnover | 成交总额 | <f8 | Y | 开盘至当前时刻的累计成交额 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| current_turnover | 分笔期间成交额 | <f8 | 上交所:N | 当前成交总额(total_turnover_t) - 上一记录的成交总额 |
| | | | 深交所:Y | (total_turnover_t-1);首条记录取当前成交总额 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_sell_order_volume | 委托卖出总量 | <f8 | Y | 是指直接到切片时间的还存在的, 所有委托卖单总量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| total_buy_order_volume | 委托买入总量 | <f8 | Y | 是指直接到切片时间的还存在的,所有委托买单总量 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| wt_avg_sell_price | 加权平均委卖价格 | <f8 | Y | 单位:元 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| wt_avg_buy_price | 加权平均委买价格 | <f8 | Y | 单位:元 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| prev_close | 昨收盘价 | <f8 | Y | 上一交易日的收盘价,上交所的收盘价格是最后一分钟的成 |
| | | | | 交均价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| open | 开盘价 | <f8 | Y | 当日开盘价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| high | 最高价 | <f8 | Y | 开盘至当前时刻所出现的最高成交价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| low | 最低价 | <f8 | Y | 开盘至当前时刻所出现的最低成交价 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| daily_close | 今日收盘价 | <f8 | Y | 该交易日的收盘价,上交所的收盘价格是最后一分钟的成交均|
| | | | | 价(在最后一笔行情上更新,其余行值为0) |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| sell_level_no | 申卖价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_level_no | 申买价格档位数 | <f8 | Y | 表示揭示的档位数,取值(0,10) |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
bbla
.. code-block:: python3
from simonsc.api import history_snapshot
>>> dt = pd.Timestamp("2020-07-24 14:55:00")
>>> fields=["datetime","last","buy_price_1","buy_volume_1","sell_price_1","sell_volume_1","sell_price_10"]
>>> data = history_snapshot(order_book_id="600446.XSHG", dt=dt, bar_count=10, fields=fields)
>>> print(data)
last buy_price_1 buy_volume_1 sell_price_1 sell_volume_1 sell_price_10
order_book_id datetime
600446.XSHG 2020-07-24 14:54:32 19.12 19.12 1100. 19.13, 1500. 19.26
2020-07-24 14:54:35 19.12 19.11 6600. 19.12, 57900. 19.25
2020-07-24 14:54:38 19.12 19.11 6800. 19.12, 57800. 19.25
2020-07-24 14:54:41 19.12 19.11 36400. 19.12, 57200. 19.25
2020-07-24 14:54:44 19.11 19.11 21200. 19.12, 55900. 19.25
2020-07-24 14:54:47 19.11 19.11 7400. 19.12, 52200. 19.25
2020-07-24 14:54:50 19.12 19.11 4700. 19.12, 40800. 19.25
2020-07-24 14:54:53 19.12 19.12 41800. 19.13, 9700. 19.26
2020-07-24 14:54:56 19.12 19.12 40900. 19.13, 9700. 19.26
2020-07-24 14:54:59 19.13 19.12 44000. 19.13, 9600. 19.26<|endoftext|> |
22f0cf81279a78a0c45fed7be6c33089f17d11813be1f9363af7da6347424765 | @assert_auth
@export_as_api
def history_transaction(order_book_id: str, tick_count: int, start_dt: datetime, end_dt: datetime, fields: List[str]=None, include_prehours: bool=False) -> np.ndarray:
'获取指定合约的历史快照数据\n \n :param order_book_id: 合约代码\n :param tick_count: 获取的逐笔成交条数, 与start_dt, end_dt, 三者必填两者\n :param start_dt: 获取数据的起始日期时间,e.g. “2017-01-12 09:33:05”\n :param end_dt: 获取数据的截止日期时间,e.g. “2017-01-12 09:33:05”\n :param fields: 返回数据字段。必填项。见下方列表。\n :param include_prehours: 是否包含盘前数据\n \n .. admonition:: 可支持的数据字段\n :class: dropdown, note\n \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | fields | 中文名 | dtype | 是否是原始字段 | 注释 | \n +========================================+==========================+=========+================+=======================================================+\n | date | 交易归属日期 | <i8 | Y | yyyymmdd | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmssmmm,由交易日当天日期和数据生成时间 |\n | | | | |(交易所直接下发的)合成。精确到10毫秒级 | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_price | 成交价格 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_volume | 成交数量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_turnover | 成交金额 | <f8 | 上交所:Y | 成交价格 X 成交量 |\n | | | | 深交所:N | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_sell_flag | 内外盘标志 | O | 上交所:Y | 上交所:(深交所全部为NULL) |\n | | | | 深交所:N | - 2013-04-15前,没有下发该字段,值为NULL; |\n | | | | | - 2013-04-15至今,下发了该字段,字段值含义分别如下: |\n | | | | | B:外盘,主动买;S:内盘,主动卖;N:未知 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n\n \n bbla \n \n .. code-block:: python3\n \n from simons.api import history_transaction\n \n >>> dt = pd.Timestamp("2020-07-24 14:55:00")\n >>> fields=["datetime","trade_price","trade_volume","trade_turnover"]\n >>> data = history_transaction(order_book_id="600446.XSHG", start_dt=dt, bar_count=10, fields=fields)\n >>> print(data)\n \n ' | 获取指定合约的历史快照数据
:param order_book_id: 合约代码
:param tick_count: 获取的逐笔成交条数, 与start_dt, end_dt, 三者必填两者
:param start_dt: 获取数据的起始日期时间,e.g. “2017-01-12 09:33:05”
:param end_dt: 获取数据的截止日期时间,e.g. “2017-01-12 09:33:05”
:param fields: 返回数据字段。必填项。见下方列表。
:param include_prehours: 是否包含盘前数据
.. admonition:: 可支持的数据字段
:class: dropdown, note
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| fields | 中文名 | dtype | 是否是原始字段 | 注释 |
+========================================+==========================+=========+================+=======================================================+
| date | 交易归属日期 | <i8 | Y | yyyymmdd |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmssmmm,由交易日当天日期和数据生成时间 |
| | | | |(交易所直接下发的)合成。精确到10毫秒级 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| trade_price | 成交价格 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| trade_volume | 成交数量 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| trade_turnover | 成交金额 | <f8 | 上交所:Y | 成交价格 X 成交量 |
| | | | 深交所:N | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_sell_flag | 内外盘标志 | O | 上交所:Y | 上交所:(深交所全部为NULL) |
| | | | 深交所:N | - 2013-04-15前,没有下发该字段,值为NULL; |
| | | | | - 2013-04-15至今,下发了该字段,字段值含义分别如下: |
| | | | | B:外盘,主动买;S:内盘,主动卖;N:未知 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
bbla
.. code-block:: python3
from simons.api import history_transaction
>>> dt = pd.Timestamp("2020-07-24 14:55:00")
>>> fields=["datetime","trade_price","trade_volume","trade_turnover"]
>>> data = history_transaction(order_book_id="600446.XSHG", start_dt=dt, bar_count=10, fields=fields)
>>> print(data) | simonsc/api/base_api.py | history_transaction | jzkj-luolinh/simonsc | 0 | python | @assert_auth
@export_as_api
def history_transaction(order_book_id: str, tick_count: int, start_dt: datetime, end_dt: datetime, fields: List[str]=None, include_prehours: bool=False) -> np.ndarray:
'获取指定合约的历史快照数据\n \n :param order_book_id: 合约代码\n :param tick_count: 获取的逐笔成交条数, 与start_dt, end_dt, 三者必填两者\n :param start_dt: 获取数据的起始日期时间,e.g. “2017-01-12 09:33:05”\n :param end_dt: 获取数据的截止日期时间,e.g. “2017-01-12 09:33:05”\n :param fields: 返回数据字段。必填项。见下方列表。\n :param include_prehours: 是否包含盘前数据\n \n .. admonition:: 可支持的数据字段\n :class: dropdown, note\n \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | fields | 中文名 | dtype | 是否是原始字段 | 注释 | \n +========================================+==========================+=========+================+=======================================================+\n | date | 交易归属日期 | <i8 | Y | yyyymmdd | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmssmmm,由交易日当天日期和数据生成时间 |\n | | | | |(交易所直接下发的)合成。精确到10毫秒级 | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_price | 成交价格 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_volume | 成交数量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_turnover | 成交金额 | <f8 | 上交所:Y | 成交价格 X 成交量 |\n | | | | 深交所:N | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_sell_flag | 内外盘标志 | O | 上交所:Y | 上交所:(深交所全部为NULL) |\n | | | | 深交所:N | - 2013-04-15前,没有下发该字段,值为NULL; |\n | | | | | - 2013-04-15至今,下发了该字段,字段值含义分别如下: |\n | | | | | B:外盘,主动买;S:内盘,主动卖;N:未知 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n\n \n bbla \n \n .. code-block:: python3\n \n from simons.api import history_transaction\n \n >>> dt = pd.Timestamp("2020-07-24 14:55:00")\n >>> fields=["datetime","trade_price","trade_volume","trade_turnover"]\n >>> data = history_transaction(order_book_id="600446.XSHG", start_dt=dt, bar_count=10, fields=fields)\n >>> print(data)\n \n ' | @assert_auth
@export_as_api
def history_transaction(order_book_id: str, tick_count: int, start_dt: datetime, end_dt: datetime, fields: List[str]=None, include_prehours: bool=False) -> np.ndarray:
'获取指定合约的历史快照数据\n \n :param order_book_id: 合约代码\n :param tick_count: 获取的逐笔成交条数, 与start_dt, end_dt, 三者必填两者\n :param start_dt: 获取数据的起始日期时间,e.g. “2017-01-12 09:33:05”\n :param end_dt: 获取数据的截止日期时间,e.g. “2017-01-12 09:33:05”\n :param fields: 返回数据字段。必填项。见下方列表。\n :param include_prehours: 是否包含盘前数据\n \n .. admonition:: 可支持的数据字段\n :class: dropdown, note\n \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | fields | 中文名 | dtype | 是否是原始字段 | 注释 | \n +========================================+==========================+=========+================+=======================================================+\n | date | 交易归属日期 | <i8 | Y | yyyymmdd | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmssmmm,由交易日当天日期和数据生成时间 |\n | | | | |(交易所直接下发的)合成。精确到10毫秒级 | \n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_price | 成交价格 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_volume | 成交数量 | <f8 | Y | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | trade_turnover | 成交金额 | <f8 | 上交所:Y | 成交价格 X 成交量 |\n | | | | 深交所:N | |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n | buy_sell_flag | 内外盘标志 | O | 上交所:Y | 上交所:(深交所全部为NULL) |\n | | | | 深交所:N | - 2013-04-15前,没有下发该字段,值为NULL; |\n | | | | | - 2013-04-15至今,下发了该字段,字段值含义分别如下: |\n | | | | | B:外盘,主动买;S:内盘,主动卖;N:未知 |\n +----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+\n\n \n bbla \n \n .. code-block:: python3\n \n from simons.api import history_transaction\n \n >>> dt = pd.Timestamp("2020-07-24 14:55:00")\n >>> fields=["datetime","trade_price","trade_volume","trade_turnover"]\n >>> data = history_transaction(order_book_id="600446.XSHG", start_dt=dt, bar_count=10, fields=fields)\n >>> print(data)\n \n '<|docstring|>获取指定合约的历史快照数据
:param order_book_id: 合约代码
:param tick_count: 获取的逐笔成交条数, 与start_dt, end_dt, 三者必填两者
:param start_dt: 获取数据的起始日期时间,e.g. “2017-01-12 09:33:05”
:param end_dt: 获取数据的截止日期时间,e.g. “2017-01-12 09:33:05”
:param fields: 返回数据字段。必填项。见下方列表。
:param include_prehours: 是否包含盘前数据
.. admonition:: 可支持的数据字段
:class: dropdown, note
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| fields | 中文名 | dtype | 是否是原始字段 | 注释 |
+========================================+==========================+=========+================+=======================================================+
| date | 交易归属日期 | <i8 | Y | yyyymmdd |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| datetime | 交易发生时间 | <i8 | C | yyyymmddhhmmssmmm,由交易日当天日期和数据生成时间 |
| | | | |(交易所直接下发的)合成。精确到10毫秒级 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| trade_price | 成交价格 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| trade_volume | 成交数量 | <f8 | Y | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| trade_turnover | 成交金额 | <f8 | 上交所:Y | 成交价格 X 成交量 |
| | | | 深交所:N | |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
| buy_sell_flag | 内外盘标志 | O | 上交所:Y | 上交所:(深交所全部为NULL) |
| | | | 深交所:N | - 2013-04-15前,没有下发该字段,值为NULL; |
| | | | | - 2013-04-15至今,下发了该字段,字段值含义分别如下: |
| | | | | B:外盘,主动买;S:内盘,主动卖;N:未知 |
+----------------------------------------+--------------------------+---------+----------------+-------------------------------------------------------+
bbla
.. code-block:: python3
from simons.api import history_transaction
>>> dt = pd.Timestamp("2020-07-24 14:55:00")
>>> fields=["datetime","trade_price","trade_volume","trade_turnover"]
>>> data = history_transaction(order_book_id="600446.XSHG", start_dt=dt, bar_count=10, fields=fields)
>>> print(data)<|endoftext|> |
a0a9871528ffd1c90b3005d82b08eeae7f20d78e0a8df74434ac2deaa8ef3907 | @pytest.mark.parametrize('model_class', [TemplateModel, MultipleInputModel, MultipleOutputModel, DictInputDictOutputModel])
@pytest.mark.parametrize('mix_data', [True, False])
@pytest.mark.parametrize('device', [pytest.param(torch.device('cpu')), pytest.param(torch.device('cuda', 0), marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification(model_class, mix_data, device):
' Test detection of batch gradient mixing with different PyTorch models. '
model = model_class(mix_data).to(device)
is_valid = (not mix_data)
verification = BatchGradientVerification(model)
result = verification.check(input_array=model.input_array)
assert (result == is_valid) | Test detection of batch gradient mixing with different PyTorch models. | tests/callbacks/verification/test_batch_gradient.py | test_batch_gradient_verification | BartekRoszak/pytorch-lightning-bolts | 2 | python | @pytest.mark.parametrize('model_class', [TemplateModel, MultipleInputModel, MultipleOutputModel, DictInputDictOutputModel])
@pytest.mark.parametrize('mix_data', [True, False])
@pytest.mark.parametrize('device', [pytest.param(torch.device('cpu')), pytest.param(torch.device('cuda', 0), marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification(model_class, mix_data, device):
' '
model = model_class(mix_data).to(device)
is_valid = (not mix_data)
verification = BatchGradientVerification(model)
result = verification.check(input_array=model.input_array)
assert (result == is_valid) | @pytest.mark.parametrize('model_class', [TemplateModel, MultipleInputModel, MultipleOutputModel, DictInputDictOutputModel])
@pytest.mark.parametrize('mix_data', [True, False])
@pytest.mark.parametrize('device', [pytest.param(torch.device('cpu')), pytest.param(torch.device('cuda', 0), marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification(model_class, mix_data, device):
' '
model = model_class(mix_data).to(device)
is_valid = (not mix_data)
verification = BatchGradientVerification(model)
result = verification.check(input_array=model.input_array)
assert (result == is_valid)<|docstring|>Test detection of batch gradient mixing with different PyTorch models.<|endoftext|> |
40c21d115c4bc648d98cc1c77cda2d98030762a0a70c10cf88b591aa6255eba5 | @pytest.mark.parametrize('mix_data', [True, False])
@pytest.mark.parametrize('device', [pytest.param(torch.device('cpu')), pytest.param(torch.device('cuda', 0), marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification_pl_module(mix_data, device):
' Test detection of batch gradient mixing with a LightningModule. '
model = LitModel(mix_data).to(device)
is_valid = (not mix_data)
verification = BatchGradientVerification(model)
result = verification.check(input_array=None)
assert (result == is_valid) | Test detection of batch gradient mixing with a LightningModule. | tests/callbacks/verification/test_batch_gradient.py | test_batch_gradient_verification_pl_module | BartekRoszak/pytorch-lightning-bolts | 2 | python | @pytest.mark.parametrize('mix_data', [True, False])
@pytest.mark.parametrize('device', [pytest.param(torch.device('cpu')), pytest.param(torch.device('cuda', 0), marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification_pl_module(mix_data, device):
' '
model = LitModel(mix_data).to(device)
is_valid = (not mix_data)
verification = BatchGradientVerification(model)
result = verification.check(input_array=None)
assert (result == is_valid) | @pytest.mark.parametrize('mix_data', [True, False])
@pytest.mark.parametrize('device', [pytest.param(torch.device('cpu')), pytest.param(torch.device('cuda', 0), marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification_pl_module(mix_data, device):
' '
model = LitModel(mix_data).to(device)
is_valid = (not mix_data)
verification = BatchGradientVerification(model)
result = verification.check(input_array=None)
assert (result == is_valid)<|docstring|>Test detection of batch gradient mixing with a LightningModule.<|endoftext|> |
bd49d0cc6cfa5bd5a63dd33722dd780b7b551fc85229bc601d58e5794dfa4d54 | @pytest.mark.parametrize('gpus', [pytest.param(0), pytest.param(1, marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification_callback(gpus):
' Test detection of batch gradient mixing with the callback implementation. '
trainer = Trainer(gpus=gpus)
model = LitModel(mix_data=True)
expected = 'Your model is mixing data across the batch dimension.'
callback = BatchGradientVerificationCallback()
with pytest.warns(UserWarning, match=expected):
callback.on_train_start(trainer, model)
callback = BatchGradientVerificationCallback(error=True)
with pytest.raises(RuntimeError, match=expected):
callback.on_train_start(trainer, model) | Test detection of batch gradient mixing with the callback implementation. | tests/callbacks/verification/test_batch_gradient.py | test_batch_gradient_verification_callback | BartekRoszak/pytorch-lightning-bolts | 2 | python | @pytest.mark.parametrize('gpus', [pytest.param(0), pytest.param(1, marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification_callback(gpus):
' '
trainer = Trainer(gpus=gpus)
model = LitModel(mix_data=True)
expected = 'Your model is mixing data across the batch dimension.'
callback = BatchGradientVerificationCallback()
with pytest.warns(UserWarning, match=expected):
callback.on_train_start(trainer, model)
callback = BatchGradientVerificationCallback(error=True)
with pytest.raises(RuntimeError, match=expected):
callback.on_train_start(trainer, model) | @pytest.mark.parametrize('gpus', [pytest.param(0), pytest.param(1, marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='Test requires GPU'))])
def test_batch_gradient_verification_callback(gpus):
' '
trainer = Trainer(gpus=gpus)
model = LitModel(mix_data=True)
expected = 'Your model is mixing data across the batch dimension.'
callback = BatchGradientVerificationCallback()
with pytest.warns(UserWarning, match=expected):
callback.on_train_start(trainer, model)
callback = BatchGradientVerificationCallback(error=True)
with pytest.raises(RuntimeError, match=expected):
callback.on_train_start(trainer, model)<|docstring|>Test detection of batch gradient mixing with the callback implementation.<|endoftext|> |
d9943b3598ecd00c102fd33ace2611c9ad31b0a999755d2a6a98b1ec7f227360 | def test_batch_verification_raises_on_batch_size_1():
' Test that batch gradient verification only works with batch size greater than one. '
model = TemplateModel()
verification = BatchGradientVerification(model)
small_batch = model.input_array[0:1]
with pytest.raises(MisconfigurationException, match='Batch size must be greater than 1'):
verification.check(input_array=small_batch) | Test that batch gradient verification only works with batch size greater than one. | tests/callbacks/verification/test_batch_gradient.py | test_batch_verification_raises_on_batch_size_1 | BartekRoszak/pytorch-lightning-bolts | 2 | python | def test_batch_verification_raises_on_batch_size_1():
' '
model = TemplateModel()
verification = BatchGradientVerification(model)
small_batch = model.input_array[0:1]
with pytest.raises(MisconfigurationException, match='Batch size must be greater than 1'):
verification.check(input_array=small_batch) | def test_batch_verification_raises_on_batch_size_1():
' '
model = TemplateModel()
verification = BatchGradientVerification(model)
small_batch = model.input_array[0:1]
with pytest.raises(MisconfigurationException, match='Batch size must be greater than 1'):
verification.check(input_array=small_batch)<|docstring|>Test that batch gradient verification only works with batch size greater than one.<|endoftext|> |
f3c20f0c01c838c5d4d3fc36cc6e5eb60ea793acd91c3c6f16202440768a4283 | def test_batch_verification_calls_custom_input_output_mappings():
' Test that batch gradient verification can support different input and outputs with user-provided mappings. '
model = MultipleInputModel()
def input_mapping(inputs):
assert (isinstance(inputs, tuple) and (len(inputs) == 2))
return [inputs[0]]
def output_mapping(outputs):
assert isinstance(outputs, torch.Tensor)
return torch.cat((outputs, outputs), 1)
mocked_input_mapping = Mock(wraps=input_mapping)
mocked_output_mapping = Mock(wraps=output_mapping)
verification = BatchGradientVerification(model)
verification.check(model.input_array, input_mapping=mocked_input_mapping, output_mapping=mocked_output_mapping)
mocked_input_mapping.assert_called_once()
mocked_output_mapping.assert_called_once() | Test that batch gradient verification can support different input and outputs with user-provided mappings. | tests/callbacks/verification/test_batch_gradient.py | test_batch_verification_calls_custom_input_output_mappings | BartekRoszak/pytorch-lightning-bolts | 2 | python | def test_batch_verification_calls_custom_input_output_mappings():
' '
model = MultipleInputModel()
def input_mapping(inputs):
assert (isinstance(inputs, tuple) and (len(inputs) == 2))
return [inputs[0]]
def output_mapping(outputs):
assert isinstance(outputs, torch.Tensor)
return torch.cat((outputs, outputs), 1)
mocked_input_mapping = Mock(wraps=input_mapping)
mocked_output_mapping = Mock(wraps=output_mapping)
verification = BatchGradientVerification(model)
verification.check(model.input_array, input_mapping=mocked_input_mapping, output_mapping=mocked_output_mapping)
mocked_input_mapping.assert_called_once()
mocked_output_mapping.assert_called_once() | def test_batch_verification_calls_custom_input_output_mappings():
' '
model = MultipleInputModel()
def input_mapping(inputs):
assert (isinstance(inputs, tuple) and (len(inputs) == 2))
return [inputs[0]]
def output_mapping(outputs):
assert isinstance(outputs, torch.Tensor)
return torch.cat((outputs, outputs), 1)
mocked_input_mapping = Mock(wraps=input_mapping)
mocked_output_mapping = Mock(wraps=output_mapping)
verification = BatchGradientVerification(model)
verification.check(model.input_array, input_mapping=mocked_input_mapping, output_mapping=mocked_output_mapping)
mocked_input_mapping.assert_called_once()
mocked_output_mapping.assert_called_once()<|docstring|>Test that batch gradient verification can support different input and outputs with user-provided mappings.<|endoftext|> |
34dac8e32d60592d987d9b982874176d3956555289668d23d637cb49b3e533d4 | def test_default_input_mapping():
' Test the data types and nesting the default input mapping can handle. '
b = 3
tensor0 = torch.rand(b, 2, 5)
tensor1 = torch.rand(b, 9)
tensor2 = torch.rand(b, 5, 1)
data = tensor0.double()
output = default_input_mapping(data)
assert (len(output) == 1)
assert (output[0] is data)
data = ('foo', tensor1, tensor2, [])
(out1, out2) = default_input_mapping(data)
assert (out1 is tensor1)
assert (out2 is tensor2)
data = {'one': ['foo', tensor2], 'two': tensor0}
(out2, out0) = default_input_mapping(data)
assert (out2 is tensor2)
assert (out0 is tensor0) | Test the data types and nesting the default input mapping can handle. | tests/callbacks/verification/test_batch_gradient.py | test_default_input_mapping | BartekRoszak/pytorch-lightning-bolts | 2 | python | def test_default_input_mapping():
' '
b = 3
tensor0 = torch.rand(b, 2, 5)
tensor1 = torch.rand(b, 9)
tensor2 = torch.rand(b, 5, 1)
data = tensor0.double()
output = default_input_mapping(data)
assert (len(output) == 1)
assert (output[0] is data)
data = ('foo', tensor1, tensor2, [])
(out1, out2) = default_input_mapping(data)
assert (out1 is tensor1)
assert (out2 is tensor2)
data = {'one': ['foo', tensor2], 'two': tensor0}
(out2, out0) = default_input_mapping(data)
assert (out2 is tensor2)
assert (out0 is tensor0) | def test_default_input_mapping():
' '
b = 3
tensor0 = torch.rand(b, 2, 5)
tensor1 = torch.rand(b, 9)
tensor2 = torch.rand(b, 5, 1)
data = tensor0.double()
output = default_input_mapping(data)
assert (len(output) == 1)
assert (output[0] is data)
data = ('foo', tensor1, tensor2, [])
(out1, out2) = default_input_mapping(data)
assert (out1 is tensor1)
assert (out2 is tensor2)
data = {'one': ['foo', tensor2], 'two': tensor0}
(out2, out0) = default_input_mapping(data)
assert (out2 is tensor2)
assert (out0 is tensor0)<|docstring|>Test the data types and nesting the default input mapping can handle.<|endoftext|> |
6946fe50d32b4d844232029eb0108c9d8096ffaeef31be42ceac83785593a289 | def test_default_output_mapping():
' Test the data types and nesting the default output mapping can handle. '
b = 3
tensor0 = torch.rand(b, 2, 5)
tensor1 = torch.rand(b, 9)
tensor2 = torch.rand(b, 5, 1)
tensor3 = torch.rand(b)
scalar = torch.tensor(3.14)
data = tensor0.double()
output = default_output_mapping(data)
assert (output is data)
data = (tensor0, None, tensor1, 'foo', [tensor2])
expected = torch.cat((tensor0.view(b, (- 1)), tensor1.view(b, (- 1)), tensor2.view(b, (- 1))), dim=1)
output = default_output_mapping(data)
assert torch.all((output == expected))
data = {'one': tensor1, 'two': {'three': tensor3.double()}, 'four': scalar, 'five': [tensor0, tensor0]}
expected = torch.cat((tensor1.view(b, (- 1)), tensor3.view(b, (- 1)), tensor0.view(b, (- 1)), tensor0.view(b, (- 1))), dim=1)
output = default_output_mapping(data)
assert torch.all((output == expected)) | Test the data types and nesting the default output mapping can handle. | tests/callbacks/verification/test_batch_gradient.py | test_default_output_mapping | BartekRoszak/pytorch-lightning-bolts | 2 | python | def test_default_output_mapping():
' '
b = 3
tensor0 = torch.rand(b, 2, 5)
tensor1 = torch.rand(b, 9)
tensor2 = torch.rand(b, 5, 1)
tensor3 = torch.rand(b)
scalar = torch.tensor(3.14)
data = tensor0.double()
output = default_output_mapping(data)
assert (output is data)
data = (tensor0, None, tensor1, 'foo', [tensor2])
expected = torch.cat((tensor0.view(b, (- 1)), tensor1.view(b, (- 1)), tensor2.view(b, (- 1))), dim=1)
output = default_output_mapping(data)
assert torch.all((output == expected))
data = {'one': tensor1, 'two': {'three': tensor3.double()}, 'four': scalar, 'five': [tensor0, tensor0]}
expected = torch.cat((tensor1.view(b, (- 1)), tensor3.view(b, (- 1)), tensor0.view(b, (- 1)), tensor0.view(b, (- 1))), dim=1)
output = default_output_mapping(data)
assert torch.all((output == expected)) | def test_default_output_mapping():
' '
b = 3
tensor0 = torch.rand(b, 2, 5)
tensor1 = torch.rand(b, 9)
tensor2 = torch.rand(b, 5, 1)
tensor3 = torch.rand(b)
scalar = torch.tensor(3.14)
data = tensor0.double()
output = default_output_mapping(data)
assert (output is data)
data = (tensor0, None, tensor1, 'foo', [tensor2])
expected = torch.cat((tensor0.view(b, (- 1)), tensor1.view(b, (- 1)), tensor2.view(b, (- 1))), dim=1)
output = default_output_mapping(data)
assert torch.all((output == expected))
data = {'one': tensor1, 'two': {'three': tensor3.double()}, 'four': scalar, 'five': [tensor0, tensor0]}
expected = torch.cat((tensor1.view(b, (- 1)), tensor3.view(b, (- 1)), tensor0.view(b, (- 1)), tensor0.view(b, (- 1))), dim=1)
output = default_output_mapping(data)
assert torch.all((output == expected))<|docstring|>Test the data types and nesting the default output mapping can handle.<|endoftext|> |
2bac61a4432dcbcbab318e1602e3d20562ed0e4875db17742bdb8bc0ca75c73c | def __init__(self, mix_data=False):
' Base model for testing. The setting ``mix_data=True`` simulates a wrong implementation. '
super().__init__()
self.mix_data = mix_data
self.linear = nn.Linear(10, 5)
self.input_array = torch.rand(10, 5, 2) | Base model for testing. The setting ``mix_data=True`` simulates a wrong implementation. | tests/callbacks/verification/test_batch_gradient.py | __init__ | BartekRoszak/pytorch-lightning-bolts | 2 | python | def __init__(self, mix_data=False):
' '
super().__init__()
self.mix_data = mix_data
self.linear = nn.Linear(10, 5)
self.input_array = torch.rand(10, 5, 2) | def __init__(self, mix_data=False):
' '
super().__init__()
self.mix_data = mix_data
self.linear = nn.Linear(10, 5)
self.input_array = torch.rand(10, 5, 2)<|docstring|>Base model for testing. The setting ``mix_data=True`` simulates a wrong implementation.<|endoftext|> |
410608d33c6ee895203d4d8a67d8d87a86dca66058c5a5c532c4a2ba88606eb9 | def text_stamp():
'Dummy function to be replaced with decorated function'
return True | Dummy function to be replaced with decorated function | build/lib/forest/barc/text_stamp.py | text_stamp | cemac/forest | 1 | python | def text_stamp():
return True | def text_stamp():
return True<|docstring|>Dummy function to be replaced with decorated function<|endoftext|> |
0acec07ff93c1c325636a71c1b0044ab200bec2d03b073d3417096865f3969b5 | def init_state(self, init_params: Any, hyperparams_prox: Any, *args, **kwargs) -> ProxGradState:
'Initialize the solver state.\n\n Args:\n init_params: pytree containing the initial parameters.\n hyperparams_prox: pytree containing hyperparameters of prox.\n *args: additional positional arguments to be passed to ``fun``.\n **kwargs: additional keyword arguments to be passed to ``fun``.\n Returns:\n state\n '
del hyperparams_prox, args, kwargs
if self.acceleration:
state = ProxGradState(iter_num=jnp.asarray(0), velocity=init_params, t=jnp.asarray(1.0), stepsize=jnp.asarray(1.0), error=jnp.asarray(jnp.inf))
else:
state = ProxGradState(iter_num=jnp.asarray(0), stepsize=jnp.asarray(1.0), error=jnp.asarray(jnp.inf))
return state | Initialize the solver state.
Args:
init_params: pytree containing the initial parameters.
hyperparams_prox: pytree containing hyperparameters of prox.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
state | jaxopt/_src/proximal_gradient.py | init_state | fabianp/jaxopt | 434 | python | def init_state(self, init_params: Any, hyperparams_prox: Any, *args, **kwargs) -> ProxGradState:
'Initialize the solver state.\n\n Args:\n init_params: pytree containing the initial parameters.\n hyperparams_prox: pytree containing hyperparameters of prox.\n *args: additional positional arguments to be passed to ``fun``.\n **kwargs: additional keyword arguments to be passed to ``fun``.\n Returns:\n state\n '
del hyperparams_prox, args, kwargs
if self.acceleration:
state = ProxGradState(iter_num=jnp.asarray(0), velocity=init_params, t=jnp.asarray(1.0), stepsize=jnp.asarray(1.0), error=jnp.asarray(jnp.inf))
else:
state = ProxGradState(iter_num=jnp.asarray(0), stepsize=jnp.asarray(1.0), error=jnp.asarray(jnp.inf))
return state | def init_state(self, init_params: Any, hyperparams_prox: Any, *args, **kwargs) -> ProxGradState:
'Initialize the solver state.\n\n Args:\n init_params: pytree containing the initial parameters.\n hyperparams_prox: pytree containing hyperparameters of prox.\n *args: additional positional arguments to be passed to ``fun``.\n **kwargs: additional keyword arguments to be passed to ``fun``.\n Returns:\n state\n '
del hyperparams_prox, args, kwargs
if self.acceleration:
state = ProxGradState(iter_num=jnp.asarray(0), velocity=init_params, t=jnp.asarray(1.0), stepsize=jnp.asarray(1.0), error=jnp.asarray(jnp.inf))
else:
state = ProxGradState(iter_num=jnp.asarray(0), stepsize=jnp.asarray(1.0), error=jnp.asarray(jnp.inf))
return state<|docstring|>Initialize the solver state.
Args:
init_params: pytree containing the initial parameters.
hyperparams_prox: pytree containing hyperparameters of prox.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
state<|endoftext|> |
a0ebdddd76149296edbca491074256bfb5eb9a9d6a6e15a61ee2e2532695ad63 | def update(self, params: Any, state: NamedTuple, hyperparams_prox: Any, *args, **kwargs) -> base.OptStep:
'Performs one iteration of proximal gradient.\n\n Args:\n params: pytree containing the parameters.\n state: named tuple containing the solver state.\n hyperparams_prox: pytree containing hyperparameters of prox.\n *args: additional positional arguments to be passed to ``fun``.\n **kwargs: additional keyword arguments to be passed to ``fun``.\n Returns:\n (params, state)\n '
f = (self._update_accel if self.acceleration else self._update)
return f(params, state, hyperparams_prox, args, kwargs) | Performs one iteration of proximal gradient.
Args:
params: pytree containing the parameters.
state: named tuple containing the solver state.
hyperparams_prox: pytree containing hyperparameters of prox.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
(params, state) | jaxopt/_src/proximal_gradient.py | update | fabianp/jaxopt | 434 | python | def update(self, params: Any, state: NamedTuple, hyperparams_prox: Any, *args, **kwargs) -> base.OptStep:
'Performs one iteration of proximal gradient.\n\n Args:\n params: pytree containing the parameters.\n state: named tuple containing the solver state.\n hyperparams_prox: pytree containing hyperparameters of prox.\n *args: additional positional arguments to be passed to ``fun``.\n **kwargs: additional keyword arguments to be passed to ``fun``.\n Returns:\n (params, state)\n '
f = (self._update_accel if self.acceleration else self._update)
return f(params, state, hyperparams_prox, args, kwargs) | def update(self, params: Any, state: NamedTuple, hyperparams_prox: Any, *args, **kwargs) -> base.OptStep:
'Performs one iteration of proximal gradient.\n\n Args:\n params: pytree containing the parameters.\n state: named tuple containing the solver state.\n hyperparams_prox: pytree containing hyperparameters of prox.\n *args: additional positional arguments to be passed to ``fun``.\n **kwargs: additional keyword arguments to be passed to ``fun``.\n Returns:\n (params, state)\n '
f = (self._update_accel if self.acceleration else self._update)
return f(params, state, hyperparams_prox, args, kwargs)<|docstring|>Performs one iteration of proximal gradient.
Args:
params: pytree containing the parameters.
state: named tuple containing the solver state.
hyperparams_prox: pytree containing hyperparameters of prox.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
(params, state)<|endoftext|> |
e240760ec73a62a1b6f3c7219430a8cf3bf50168187f56ac0ab856d640d7cf1e | def optimality_fun(self, sol, hyperparams_prox, *args, **kwargs):
'Optimality function mapping compatible with ``@custom_root``.'
fp = self._fixed_point_fun(sol, hyperparams_prox, args, kwargs)
return tree_sub(fp, sol) | Optimality function mapping compatible with ``@custom_root``. | jaxopt/_src/proximal_gradient.py | optimality_fun | fabianp/jaxopt | 434 | python | def optimality_fun(self, sol, hyperparams_prox, *args, **kwargs):
fp = self._fixed_point_fun(sol, hyperparams_prox, args, kwargs)
return tree_sub(fp, sol) | def optimality_fun(self, sol, hyperparams_prox, *args, **kwargs):
fp = self._fixed_point_fun(sol, hyperparams_prox, args, kwargs)
return tree_sub(fp, sol)<|docstring|>Optimality function mapping compatible with ``@custom_root``.<|endoftext|> |
84c153784da4e9d0838b4359e3a64fb67589cfbf8ae9cd31932feb03a6a79b80 | def run():
'Runs basic example.'
single_graph = jraph.GraphsTuple(n_node=np.asarray([3]), n_edge=np.asarray([2]), nodes=np.ones((3, 4)), edges=np.ones((2, 5)), globals=np.ones((1, 6)), senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info('Single graph %r', single_graph)
nested_graph = jraph.GraphsTuple(n_node=np.asarray([3]), n_edge=np.asarray([2]), nodes={'a': np.ones((3, 4))}, edges={'b': np.ones((2, 5))}, globals={'c': np.ones((1, 6))}, senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info('Nested graph %r', nested_graph)
implicitly_batched_graph = jraph.GraphsTuple(n_node=np.asarray([3, 1]), n_edge=np.asarray([2, 1]), nodes=np.ones((4, 4)), edges=np.ones((3, 5)), globals=np.ones((2, 6)), senders=np.array([0, 1, 3]), receivers=np.array([2, 2, 3]))
logging.info('Implicitly batched graph %r', implicitly_batched_graph)
implicitly_batched_graph = jraph.batch([single_graph, implicitly_batched_graph])
logging.info('Implicitly batched graph %r', implicitly_batched_graph)
(graph_1, graph_2, graph_3) = jraph.unbatch(implicitly_batched_graph)
logging.info('Unbatched graphs %r %r %r', graph_1, graph_2, graph_3)
padded_graph = jraph.pad_with_graphs(single_graph, n_node=10, n_edge=5, n_graph=4)
logging.info('Padded graph %r', padded_graph)
single_graph = jraph.unpad_with_graphs(padded_graph)
logging.info('Unpadded graph %r', single_graph)
explicitly_batched_graph = jraph.GraphsTuple(n_node=np.asarray([[3], [1]]), n_edge=np.asarray([[2], [1]]), nodes=np.ones((2, 3, 4)), edges=np.ones((2, 2, 5)), globals=np.ones((2, 1, 6)), senders=np.array([[0, 1], [0, (- 1)]]), receivers=np.array([[2, 2], [0, (- 1)]]))
logging.info('Explicitly batched graph %r', explicitly_batched_graph)
def update_edge_fn(edge_features, sender_node_features, receiver_node_features, globals_):
'Returns the update edge features.'
del sender_node_features
del receiver_node_features
del globals_
return edge_features
def update_node_fn(node_features, aggregated_sender_edge_features, aggregated_receiver_edge_features, globals_):
'Returns the update node features.'
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features
def update_globals_fn(aggregated_node_features, aggregated_edge_features, globals_):
del aggregated_node_features
del aggregated_edge_features
return globals_
aggregate_edges_for_nodes_fn = jraph.segment_sum
aggregate_nodes_for_globals_fn = jraph.segment_sum
aggregate_edges_for_globals_fn = jraph.segment_sum
attention_logit_fn = None
attention_reduce_fn = None
network = jraph.GraphNetwork(update_edge_fn=update_edge_fn, update_node_fn=update_node_fn, update_global_fn=update_globals_fn, attention_logit_fn=attention_logit_fn, aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn, aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn, aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn, attention_reduce_fn=attention_reduce_fn)
updated_graph = network(single_graph)
logging.info('Updated graph from single graph %r', updated_graph)
updated_graph = network(nested_graph)
logging.info('Updated graph from nested graph %r', nested_graph)
updated_graph = network(implicitly_batched_graph)
logging.info('Updated graph from implicitly batched graph %r', updated_graph)
updated_graph = network(padded_graph)
logging.info('Updated graph from padded graph %r', updated_graph)
jitted_network = jax.jit(network)
updated_graph = jitted_network(padded_graph)
logging.info('(JIT) updated graph from padded graph %r', updated_graph)
logging.info('basic.py complete!') | Runs basic example. | jraph/examples/basic.py | run | tlmakinen/jraph | 871 | python | def run():
single_graph = jraph.GraphsTuple(n_node=np.asarray([3]), n_edge=np.asarray([2]), nodes=np.ones((3, 4)), edges=np.ones((2, 5)), globals=np.ones((1, 6)), senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info('Single graph %r', single_graph)
nested_graph = jraph.GraphsTuple(n_node=np.asarray([3]), n_edge=np.asarray([2]), nodes={'a': np.ones((3, 4))}, edges={'b': np.ones((2, 5))}, globals={'c': np.ones((1, 6))}, senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info('Nested graph %r', nested_graph)
implicitly_batched_graph = jraph.GraphsTuple(n_node=np.asarray([3, 1]), n_edge=np.asarray([2, 1]), nodes=np.ones((4, 4)), edges=np.ones((3, 5)), globals=np.ones((2, 6)), senders=np.array([0, 1, 3]), receivers=np.array([2, 2, 3]))
logging.info('Implicitly batched graph %r', implicitly_batched_graph)
implicitly_batched_graph = jraph.batch([single_graph, implicitly_batched_graph])
logging.info('Implicitly batched graph %r', implicitly_batched_graph)
(graph_1, graph_2, graph_3) = jraph.unbatch(implicitly_batched_graph)
logging.info('Unbatched graphs %r %r %r', graph_1, graph_2, graph_3)
padded_graph = jraph.pad_with_graphs(single_graph, n_node=10, n_edge=5, n_graph=4)
logging.info('Padded graph %r', padded_graph)
single_graph = jraph.unpad_with_graphs(padded_graph)
logging.info('Unpadded graph %r', single_graph)
explicitly_batched_graph = jraph.GraphsTuple(n_node=np.asarray([[3], [1]]), n_edge=np.asarray([[2], [1]]), nodes=np.ones((2, 3, 4)), edges=np.ones((2, 2, 5)), globals=np.ones((2, 1, 6)), senders=np.array([[0, 1], [0, (- 1)]]), receivers=np.array([[2, 2], [0, (- 1)]]))
logging.info('Explicitly batched graph %r', explicitly_batched_graph)
def update_edge_fn(edge_features, sender_node_features, receiver_node_features, globals_):
'Returns the update edge features.'
del sender_node_features
del receiver_node_features
del globals_
return edge_features
def update_node_fn(node_features, aggregated_sender_edge_features, aggregated_receiver_edge_features, globals_):
'Returns the update node features.'
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features
def update_globals_fn(aggregated_node_features, aggregated_edge_features, globals_):
del aggregated_node_features
del aggregated_edge_features
return globals_
aggregate_edges_for_nodes_fn = jraph.segment_sum
aggregate_nodes_for_globals_fn = jraph.segment_sum
aggregate_edges_for_globals_fn = jraph.segment_sum
attention_logit_fn = None
attention_reduce_fn = None
network = jraph.GraphNetwork(update_edge_fn=update_edge_fn, update_node_fn=update_node_fn, update_global_fn=update_globals_fn, attention_logit_fn=attention_logit_fn, aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn, aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn, aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn, attention_reduce_fn=attention_reduce_fn)
updated_graph = network(single_graph)
logging.info('Updated graph from single graph %r', updated_graph)
updated_graph = network(nested_graph)
logging.info('Updated graph from nested graph %r', nested_graph)
updated_graph = network(implicitly_batched_graph)
logging.info('Updated graph from implicitly batched graph %r', updated_graph)
updated_graph = network(padded_graph)
logging.info('Updated graph from padded graph %r', updated_graph)
jitted_network = jax.jit(network)
updated_graph = jitted_network(padded_graph)
logging.info('(JIT) updated graph from padded graph %r', updated_graph)
logging.info('basic.py complete!') | def run():
single_graph = jraph.GraphsTuple(n_node=np.asarray([3]), n_edge=np.asarray([2]), nodes=np.ones((3, 4)), edges=np.ones((2, 5)), globals=np.ones((1, 6)), senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info('Single graph %r', single_graph)
nested_graph = jraph.GraphsTuple(n_node=np.asarray([3]), n_edge=np.asarray([2]), nodes={'a': np.ones((3, 4))}, edges={'b': np.ones((2, 5))}, globals={'c': np.ones((1, 6))}, senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info('Nested graph %r', nested_graph)
implicitly_batched_graph = jraph.GraphsTuple(n_node=np.asarray([3, 1]), n_edge=np.asarray([2, 1]), nodes=np.ones((4, 4)), edges=np.ones((3, 5)), globals=np.ones((2, 6)), senders=np.array([0, 1, 3]), receivers=np.array([2, 2, 3]))
logging.info('Implicitly batched graph %r', implicitly_batched_graph)
implicitly_batched_graph = jraph.batch([single_graph, implicitly_batched_graph])
logging.info('Implicitly batched graph %r', implicitly_batched_graph)
(graph_1, graph_2, graph_3) = jraph.unbatch(implicitly_batched_graph)
logging.info('Unbatched graphs %r %r %r', graph_1, graph_2, graph_3)
padded_graph = jraph.pad_with_graphs(single_graph, n_node=10, n_edge=5, n_graph=4)
logging.info('Padded graph %r', padded_graph)
single_graph = jraph.unpad_with_graphs(padded_graph)
logging.info('Unpadded graph %r', single_graph)
explicitly_batched_graph = jraph.GraphsTuple(n_node=np.asarray([[3], [1]]), n_edge=np.asarray([[2], [1]]), nodes=np.ones((2, 3, 4)), edges=np.ones((2, 2, 5)), globals=np.ones((2, 1, 6)), senders=np.array([[0, 1], [0, (- 1)]]), receivers=np.array([[2, 2], [0, (- 1)]]))
logging.info('Explicitly batched graph %r', explicitly_batched_graph)
def update_edge_fn(edge_features, sender_node_features, receiver_node_features, globals_):
'Returns the update edge features.'
del sender_node_features
del receiver_node_features
del globals_
return edge_features
def update_node_fn(node_features, aggregated_sender_edge_features, aggregated_receiver_edge_features, globals_):
'Returns the update node features.'
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features
def update_globals_fn(aggregated_node_features, aggregated_edge_features, globals_):
del aggregated_node_features
del aggregated_edge_features
return globals_
aggregate_edges_for_nodes_fn = jraph.segment_sum
aggregate_nodes_for_globals_fn = jraph.segment_sum
aggregate_edges_for_globals_fn = jraph.segment_sum
attention_logit_fn = None
attention_reduce_fn = None
network = jraph.GraphNetwork(update_edge_fn=update_edge_fn, update_node_fn=update_node_fn, update_global_fn=update_globals_fn, attention_logit_fn=attention_logit_fn, aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn, aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn, aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn, attention_reduce_fn=attention_reduce_fn)
updated_graph = network(single_graph)
logging.info('Updated graph from single graph %r', updated_graph)
updated_graph = network(nested_graph)
logging.info('Updated graph from nested graph %r', nested_graph)
updated_graph = network(implicitly_batched_graph)
logging.info('Updated graph from implicitly batched graph %r', updated_graph)
updated_graph = network(padded_graph)
logging.info('Updated graph from padded graph %r', updated_graph)
jitted_network = jax.jit(network)
updated_graph = jitted_network(padded_graph)
logging.info('(JIT) updated graph from padded graph %r', updated_graph)
logging.info('basic.py complete!')<|docstring|>Runs basic example.<|endoftext|> |
c79f610e39d084b1f075a30d25053fef1ffa0f00988545dbf280b626bfaf81e0 | def update_edge_fn(edge_features, sender_node_features, receiver_node_features, globals_):
'Returns the update edge features.'
del sender_node_features
del receiver_node_features
del globals_
return edge_features | Returns the update edge features. | jraph/examples/basic.py | update_edge_fn | tlmakinen/jraph | 871 | python | def update_edge_fn(edge_features, sender_node_features, receiver_node_features, globals_):
del sender_node_features
del receiver_node_features
del globals_
return edge_features | def update_edge_fn(edge_features, sender_node_features, receiver_node_features, globals_):
del sender_node_features
del receiver_node_features
del globals_
return edge_features<|docstring|>Returns the update edge features.<|endoftext|> |
4c3e9428b84b54222712e63aaf5445f1020dc35fe861f4ad2bb61495db8f2603 | def update_node_fn(node_features, aggregated_sender_edge_features, aggregated_receiver_edge_features, globals_):
'Returns the update node features.'
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features | Returns the update node features. | jraph/examples/basic.py | update_node_fn | tlmakinen/jraph | 871 | python | def update_node_fn(node_features, aggregated_sender_edge_features, aggregated_receiver_edge_features, globals_):
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features | def update_node_fn(node_features, aggregated_sender_edge_features, aggregated_receiver_edge_features, globals_):
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features<|docstring|>Returns the update node features.<|endoftext|> |
a531d8a7dba4b4c7f8ae64ecffeb9b4d0d433092d6c12c8770e85a84f5f46484 | def test_fixture_reordering(testdir):
'See that pytest reorders tests based on fixtures in the way we expect'
src = "\n import pytest\n\n @pytest.fixture(scope='module')\n def sf():\n print('sf {')\n yield\n print('sf }')\n \n class TestCls(object):\n def test1():\n print(1)\n def test2(sf):\n print(2)\n def test3():\n print(3)\n \n def test_fn4(sf):\n print(4)\n \n def test_fn5():\n print(5)\n assert False\n "
items = testdir.getitems(src)
print('!!!!', items)
result = testdir.runpytest('-v')
print(result)
assert False | See that pytest reorders tests based on fixtures in the way we expect | tests/test_depends.py | test_fixture_reordering | fj128/pytest-depends | 0 | python | def test_fixture_reordering(testdir):
src = "\n import pytest\n\n @pytest.fixture(scope='module')\n def sf():\n print('sf {')\n yield\n print('sf }')\n \n class TestCls(object):\n def test1():\n print(1)\n def test2(sf):\n print(2)\n def test3():\n print(3)\n \n def test_fn4(sf):\n print(4)\n \n def test_fn5():\n print(5)\n assert False\n "
items = testdir.getitems(src)
print('!!!!', items)
result = testdir.runpytest('-v')
print(result)
assert False | def test_fixture_reordering(testdir):
src = "\n import pytest\n\n @pytest.fixture(scope='module')\n def sf():\n print('sf {')\n yield\n print('sf }')\n \n class TestCls(object):\n def test1():\n print(1)\n def test2(sf):\n print(2)\n def test3():\n print(3)\n \n def test_fn4(sf):\n print(4)\n \n def test_fn5():\n print(5)\n assert False\n "
items = testdir.getitems(src)
print('!!!!', items)
result = testdir.runpytest('-v')
print(result)
assert False<|docstring|>See that pytest reorders tests based on fixtures in the way we expect<|endoftext|> |
6fde7f41badd6b6d3fa097cd773105038e53d45aa3b9ccccb7c4a16f0926a987 | @pytest.mark.skip
def test_bar_fixture(testdir):
'Make sure that pytest accepts our fixture.'
testdir.makepyfile('\n def test_sth(bar):\n assert bar == "europython2015"\n ')
result = testdir.runpytest('--foo=europython2015', '-v')
result.stdout.fnmatch_lines(['*::test_sth PASSED'])
assert (result.ret == 0) | Make sure that pytest accepts our fixture. | tests/test_depends.py | test_bar_fixture | fj128/pytest-depends | 0 | python | @pytest.mark.skip
def test_bar_fixture(testdir):
testdir.makepyfile('\n def test_sth(bar):\n assert bar == "europython2015"\n ')
result = testdir.runpytest('--foo=europython2015', '-v')
result.stdout.fnmatch_lines(['*::test_sth PASSED'])
assert (result.ret == 0) | @pytest.mark.skip
def test_bar_fixture(testdir):
testdir.makepyfile('\n def test_sth(bar):\n assert bar == "europython2015"\n ')
result = testdir.runpytest('--foo=europython2015', '-v')
result.stdout.fnmatch_lines(['*::test_sth PASSED'])
assert (result.ret == 0)<|docstring|>Make sure that pytest accepts our fixture.<|endoftext|> |
92a3b93e66957ce22b1324e06388f80a8bd4a501c63dcecc2a0870b6bec8f72c | def create_routes_news(app):
'\n Metodo que crea las rutas relacionadas con la API News\n '
@app.route('/news')
def get_news():
get_news = GetNews()
return get_news(request)
@app.route('/news/search')
def get_news_by_word():
get_article = GetNewsByWord()
return get_article(request)
@app.route('/article')
def get_article_by_id():
get_article = GetArticleById()
return get_article(request) | Metodo que crea las rutas relacionadas con la API News | routes/news.py | create_routes_news | Maurck/fisinius | 0 | python | def create_routes_news(app):
'\n \n '
@app.route('/news')
def get_news():
get_news = GetNews()
return get_news(request)
@app.route('/news/search')
def get_news_by_word():
get_article = GetNewsByWord()
return get_article(request)
@app.route('/article')
def get_article_by_id():
get_article = GetArticleById()
return get_article(request) | def create_routes_news(app):
'\n \n '
@app.route('/news')
def get_news():
get_news = GetNews()
return get_news(request)
@app.route('/news/search')
def get_news_by_word():
get_article = GetNewsByWord()
return get_article(request)
@app.route('/article')
def get_article_by_id():
get_article = GetArticleById()
return get_article(request)<|docstring|>Metodo que crea las rutas relacionadas con la API News<|endoftext|> |
4b15ce0c95f432113050d1cfcf60a8464ba5f694ddc616c186b6344e8a34c3cc | def forward(self, up_x, down_x):
'\n :param up_x: this is the output from the previous up block\n :param down_x: this is the output from the down block\n :return: upsampled feature map\n '
x = self.upsample(up_x)
x = torch.cat([x, down_x], 1)
x = self.conv_block_1(x)
x = self.conv_block_2(x)
return x | :param up_x: this is the output from the previous up block
:param down_x: this is the output from the down block
:return: upsampled feature map | detection/training/Unet.py | forward | AjitPant/AprilTag_Detection | 0 | python | def forward(self, up_x, down_x):
'\n :param up_x: this is the output from the previous up block\n :param down_x: this is the output from the down block\n :return: upsampled feature map\n '
x = self.upsample(up_x)
x = torch.cat([x, down_x], 1)
x = self.conv_block_1(x)
x = self.conv_block_2(x)
return x | def forward(self, up_x, down_x):
'\n :param up_x: this is the output from the previous up block\n :param down_x: this is the output from the down block\n :return: upsampled feature map\n '
x = self.upsample(up_x)
x = torch.cat([x, down_x], 1)
x = self.conv_block_1(x)
x = self.conv_block_2(x)
return x<|docstring|>:param up_x: this is the output from the previous up block
:param down_x: this is the output from the down block
:return: upsampled feature map<|endoftext|> |
c54e1b4291a1d71183662431ca24b3352c9356e8cb05e8a4243344bc4f94bd33 | def __init__(self, hparams):
"\n Implementation of\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n (Ronneberger et al., 2015)\n https://arxiv.org/abs/1505.04597\n Using the default arguments will yield the exact version used\n in the original paper\n Args:\n in_channels (int): number of input channels\n n_classes (int): number of output channels\n depth (int): depth of the network\n wf (int): number of filters in the first layer is 2**wf\n padding (bool): if True, apply padding such that the input shape\n is the same as the output.\n This may introduce artifacts\n batch_norm (bool): Use BatchNorm after layers with an\n activation function\n up_mode (str): one of 'upconv' or 'upsample'.\n 'upconv' will use transposed convolutions for\n learned upsampling.\n 'upsample' will use bilinear upsampling.\n "
super(Unet, self).__init__()
self.in_channels = 3
self.n_classes = 2
self.depth = 8
self.wf = 2
self.padding = True
self.batch_norm = True
self.up_mode = 'upconv'
self.hparams = hparams
assert (self.up_mode in ('upconv', 'upsample'))
self.padding = self.padding
self.depth = self.depth
prev_channels = self.in_channels
self.down_path = nn.ModuleList()
for i in range(self.depth):
self.down_path.append(UNetConvBlock(prev_channels, (2 ** (self.wf + i)), self.padding, self.batch_norm))
prev_channels = (2 ** (self.wf + i))
self.up_path = nn.ModuleList()
for i in reversed(range((self.depth - 1))):
self.up_path.append(UNetUpBlock(prev_channels, (2 ** (self.wf + i)), self.up_mode, self.padding, self.batch_norm))
prev_channels = (2 ** (self.wf + i))
self.last = nn.Conv2d(prev_channels, self.n_classes, kernel_size=1) | Implementation of
U-Net: Convolutional Networks for Biomedical Image Segmentation
(Ronneberger et al., 2015)
https://arxiv.org/abs/1505.04597
Using the default arguments will yield the exact version used
in the original paper
Args:
in_channels (int): number of input channels
n_classes (int): number of output channels
depth (int): depth of the network
wf (int): number of filters in the first layer is 2**wf
padding (bool): if True, apply padding such that the input shape
is the same as the output.
This may introduce artifacts
batch_norm (bool): Use BatchNorm after layers with an
activation function
up_mode (str): one of 'upconv' or 'upsample'.
'upconv' will use transposed convolutions for
learned upsampling.
'upsample' will use bilinear upsampling. | detection/training/Unet.py | __init__ | AjitPant/AprilTag_Detection | 0 | python | def __init__(self, hparams):
"\n Implementation of\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n (Ronneberger et al., 2015)\n https://arxiv.org/abs/1505.04597\n Using the default arguments will yield the exact version used\n in the original paper\n Args:\n in_channels (int): number of input channels\n n_classes (int): number of output channels\n depth (int): depth of the network\n wf (int): number of filters in the first layer is 2**wf\n padding (bool): if True, apply padding such that the input shape\n is the same as the output.\n This may introduce artifacts\n batch_norm (bool): Use BatchNorm after layers with an\n activation function\n up_mode (str): one of 'upconv' or 'upsample'.\n 'upconv' will use transposed convolutions for\n learned upsampling.\n 'upsample' will use bilinear upsampling.\n "
super(Unet, self).__init__()
self.in_channels = 3
self.n_classes = 2
self.depth = 8
self.wf = 2
self.padding = True
self.batch_norm = True
self.up_mode = 'upconv'
self.hparams = hparams
assert (self.up_mode in ('upconv', 'upsample'))
self.padding = self.padding
self.depth = self.depth
prev_channels = self.in_channels
self.down_path = nn.ModuleList()
for i in range(self.depth):
self.down_path.append(UNetConvBlock(prev_channels, (2 ** (self.wf + i)), self.padding, self.batch_norm))
prev_channels = (2 ** (self.wf + i))
self.up_path = nn.ModuleList()
for i in reversed(range((self.depth - 1))):
self.up_path.append(UNetUpBlock(prev_channels, (2 ** (self.wf + i)), self.up_mode, self.padding, self.batch_norm))
prev_channels = (2 ** (self.wf + i))
self.last = nn.Conv2d(prev_channels, self.n_classes, kernel_size=1) | def __init__(self, hparams):
"\n Implementation of\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n (Ronneberger et al., 2015)\n https://arxiv.org/abs/1505.04597\n Using the default arguments will yield the exact version used\n in the original paper\n Args:\n in_channels (int): number of input channels\n n_classes (int): number of output channels\n depth (int): depth of the network\n wf (int): number of filters in the first layer is 2**wf\n padding (bool): if True, apply padding such that the input shape\n is the same as the output.\n This may introduce artifacts\n batch_norm (bool): Use BatchNorm after layers with an\n activation function\n up_mode (str): one of 'upconv' or 'upsample'.\n 'upconv' will use transposed convolutions for\n learned upsampling.\n 'upsample' will use bilinear upsampling.\n "
super(Unet, self).__init__()
self.in_channels = 3
self.n_classes = 2
self.depth = 8
self.wf = 2
self.padding = True
self.batch_norm = True
self.up_mode = 'upconv'
self.hparams = hparams
assert (self.up_mode in ('upconv', 'upsample'))
self.padding = self.padding
self.depth = self.depth
prev_channels = self.in_channels
self.down_path = nn.ModuleList()
for i in range(self.depth):
self.down_path.append(UNetConvBlock(prev_channels, (2 ** (self.wf + i)), self.padding, self.batch_norm))
prev_channels = (2 ** (self.wf + i))
self.up_path = nn.ModuleList()
for i in reversed(range((self.depth - 1))):
self.up_path.append(UNetUpBlock(prev_channels, (2 ** (self.wf + i)), self.up_mode, self.padding, self.batch_norm))
prev_channels = (2 ** (self.wf + i))
self.last = nn.Conv2d(prev_channels, self.n_classes, kernel_size=1)<|docstring|>Implementation of
U-Net: Convolutional Networks for Biomedical Image Segmentation
(Ronneberger et al., 2015)
https://arxiv.org/abs/1505.04597
Using the default arguments will yield the exact version used
in the original paper
Args:
in_channels (int): number of input channels
n_classes (int): number of output channels
depth (int): depth of the network
wf (int): number of filters in the first layer is 2**wf
padding (bool): if True, apply padding such that the input shape
is the same as the output.
This may introduce artifacts
batch_norm (bool): Use BatchNorm after layers with an
activation function
up_mode (str): one of 'upconv' or 'upsample'.
'upconv' will use transposed convolutions for
learned upsampling.
'upsample' will use bilinear upsampling.<|endoftext|> |
4b7c9963d74e472f35350720b12ad9b11badde936a05fd71f00677048a832cb9 | @require_permission('perm_edit')
def emailitem_edit(request, item_container):
' Eigenschaften der Frage aendern '
parent_app = get_parent_app(item_container)
def save_values(item_container, old, new):
if item_container.is_data_object:
if new.has_key('text'):
new['text'] = new['text'].replace('<p>', '').replace('</p>', '')
else:
new['text'] = ''
save_item(item_container, old, new)
else:
save_item_container(item_container, old, new)
class DmsItemForm(forms.Form):
' Elemente des Eingabeformulars '
title = forms.CharField(max_length=240, widget=forms.TextInput(attrs={'size': 60}))
sub_title = forms.CharField(required=False, max_length=240, widget=forms.TextInput(attrs={'size': 60}))
text = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4, 'cols': 60, 'style': 'width:100%;'}))
text_more = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4, 'cols': 60, 'style': 'width:100%;'}))
section = forms.CharField(required=False, widget=forms.Select(choices=get_parent_section_choices(item_container), attrs={'size': 4, 'style': 'width:40%'}))
integer_1 = forms.ChoiceField(choices=get_yes_no_choices(), widget=forms.RadioSelect())
integer_2 = forms.IntegerField(required=False, min_value=1, max_value=200, widget=forms.TextInput(attrs={'size': 5}))
integer_3 = forms.IntegerField(required=False, min_value=1, max_value=80, widget=forms.TextInput(attrs={'size': 5}))
integer_4 = forms.IntegerField(required=False, min_value=20, max_value=60, widget=forms.TextInput(attrs={'size': 5}))
integer_5 = forms.IntegerField(required=False, min_value=3, max_value=20, widget=forms.TextInput(attrs={'size': 5}))
data_init = {'title': decode_html(item_container.item.title), 'sub_title': decode_html(item_container.item.sub_title), 'text': item_container.item.text.replace('<p>', '').replace('</p>', ''), 'text:_more': item_container.item.text_more.replace('<p>', '').replace('</p>', ''), 'section': item_container.section, 'integer_1': item_container.item.integer_1, 'integer_2': item_container.item.integer_2, 'integer_3': item_container.item.integer_3, 'integer_4': item_container.item.integer_4, 'integer_5': item_container.item.integer_5}
app_name = u'emailitem'
if (request.method == 'POST'):
data = request.POST.copy()
else:
data = data_init
f = DmsItemForm(data)
my_title = _(u'Frage ändern')
form_type = item_container.item.string_1
if (form_type == 'input'):
tabs = [('tab_base', ['title', 'sub_title', 'text_more', 'integer_1', 'integer_2', 'integer_3', 'section'])]
elif (form_type == 'text'):
tabs = [('tab_base', ['title', 'sub_title', 'text_more', 'integer_1', 'integer_4', 'integer_5', 'section'])]
else:
tabs = [('tab_base', ['title', 'sub_title', 'text', 'text_more', 'integer_1', 'section'])]
content = get_tabbed_form(tabs, help_form, app_name, f)
if ((request.method == 'POST') and (not f.errors)):
save_values(item_container, data_init, f.data)
return HttpResponseRedirect(get_site_url(item_container, item_container.item.name))
else:
vars = get_item_vars_edit(request, item_container, app_name, my_title, content, f)
return render_to_response('app/base_edit.html', vars) | Eigenschaften der Frage aendern | emailitem/views_edit.py | emailitem_edit | shagun30/djambala-2 | 0 | python | @require_permission('perm_edit')
def emailitem_edit(request, item_container):
' '
parent_app = get_parent_app(item_container)
def save_values(item_container, old, new):
if item_container.is_data_object:
if new.has_key('text'):
new['text'] = new['text'].replace('<p>', ).replace('</p>', )
else:
new['text'] =
save_item(item_container, old, new)
else:
save_item_container(item_container, old, new)
class DmsItemForm(forms.Form):
' Elemente des Eingabeformulars '
title = forms.CharField(max_length=240, widget=forms.TextInput(attrs={'size': 60}))
sub_title = forms.CharField(required=False, max_length=240, widget=forms.TextInput(attrs={'size': 60}))
text = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4, 'cols': 60, 'style': 'width:100%;'}))
text_more = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4, 'cols': 60, 'style': 'width:100%;'}))
section = forms.CharField(required=False, widget=forms.Select(choices=get_parent_section_choices(item_container), attrs={'size': 4, 'style': 'width:40%'}))
integer_1 = forms.ChoiceField(choices=get_yes_no_choices(), widget=forms.RadioSelect())
integer_2 = forms.IntegerField(required=False, min_value=1, max_value=200, widget=forms.TextInput(attrs={'size': 5}))
integer_3 = forms.IntegerField(required=False, min_value=1, max_value=80, widget=forms.TextInput(attrs={'size': 5}))
integer_4 = forms.IntegerField(required=False, min_value=20, max_value=60, widget=forms.TextInput(attrs={'size': 5}))
integer_5 = forms.IntegerField(required=False, min_value=3, max_value=20, widget=forms.TextInput(attrs={'size': 5}))
data_init = {'title': decode_html(item_container.item.title), 'sub_title': decode_html(item_container.item.sub_title), 'text': item_container.item.text.replace('<p>', ).replace('</p>', ), 'text:_more': item_container.item.text_more.replace('<p>', ).replace('</p>', ), 'section': item_container.section, 'integer_1': item_container.item.integer_1, 'integer_2': item_container.item.integer_2, 'integer_3': item_container.item.integer_3, 'integer_4': item_container.item.integer_4, 'integer_5': item_container.item.integer_5}
app_name = u'emailitem'
if (request.method == 'POST'):
data = request.POST.copy()
else:
data = data_init
f = DmsItemForm(data)
my_title = _(u'Frage ändern')
form_type = item_container.item.string_1
if (form_type == 'input'):
tabs = [('tab_base', ['title', 'sub_title', 'text_more', 'integer_1', 'integer_2', 'integer_3', 'section'])]
elif (form_type == 'text'):
tabs = [('tab_base', ['title', 'sub_title', 'text_more', 'integer_1', 'integer_4', 'integer_5', 'section'])]
else:
tabs = [('tab_base', ['title', 'sub_title', 'text', 'text_more', 'integer_1', 'section'])]
content = get_tabbed_form(tabs, help_form, app_name, f)
if ((request.method == 'POST') and (not f.errors)):
save_values(item_container, data_init, f.data)
return HttpResponseRedirect(get_site_url(item_container, item_container.item.name))
else:
vars = get_item_vars_edit(request, item_container, app_name, my_title, content, f)
return render_to_response('app/base_edit.html', vars) | @require_permission('perm_edit')
def emailitem_edit(request, item_container):
' '
parent_app = get_parent_app(item_container)
def save_values(item_container, old, new):
if item_container.is_data_object:
if new.has_key('text'):
new['text'] = new['text'].replace('<p>', ).replace('</p>', )
else:
new['text'] =
save_item(item_container, old, new)
else:
save_item_container(item_container, old, new)
class DmsItemForm(forms.Form):
' Elemente des Eingabeformulars '
title = forms.CharField(max_length=240, widget=forms.TextInput(attrs={'size': 60}))
sub_title = forms.CharField(required=False, max_length=240, widget=forms.TextInput(attrs={'size': 60}))
text = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4, 'cols': 60, 'style': 'width:100%;'}))
text_more = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4, 'cols': 60, 'style': 'width:100%;'}))
section = forms.CharField(required=False, widget=forms.Select(choices=get_parent_section_choices(item_container), attrs={'size': 4, 'style': 'width:40%'}))
integer_1 = forms.ChoiceField(choices=get_yes_no_choices(), widget=forms.RadioSelect())
integer_2 = forms.IntegerField(required=False, min_value=1, max_value=200, widget=forms.TextInput(attrs={'size': 5}))
integer_3 = forms.IntegerField(required=False, min_value=1, max_value=80, widget=forms.TextInput(attrs={'size': 5}))
integer_4 = forms.IntegerField(required=False, min_value=20, max_value=60, widget=forms.TextInput(attrs={'size': 5}))
integer_5 = forms.IntegerField(required=False, min_value=3, max_value=20, widget=forms.TextInput(attrs={'size': 5}))
data_init = {'title': decode_html(item_container.item.title), 'sub_title': decode_html(item_container.item.sub_title), 'text': item_container.item.text.replace('<p>', ).replace('</p>', ), 'text:_more': item_container.item.text_more.replace('<p>', ).replace('</p>', ), 'section': item_container.section, 'integer_1': item_container.item.integer_1, 'integer_2': item_container.item.integer_2, 'integer_3': item_container.item.integer_3, 'integer_4': item_container.item.integer_4, 'integer_5': item_container.item.integer_5}
app_name = u'emailitem'
if (request.method == 'POST'):
data = request.POST.copy()
else:
data = data_init
f = DmsItemForm(data)
my_title = _(u'Frage ändern')
form_type = item_container.item.string_1
if (form_type == 'input'):
tabs = [('tab_base', ['title', 'sub_title', 'text_more', 'integer_1', 'integer_2', 'integer_3', 'section'])]
elif (form_type == 'text'):
tabs = [('tab_base', ['title', 'sub_title', 'text_more', 'integer_1', 'integer_4', 'integer_5', 'section'])]
else:
tabs = [('tab_base', ['title', 'sub_title', 'text', 'text_more', 'integer_1', 'section'])]
content = get_tabbed_form(tabs, help_form, app_name, f)
if ((request.method == 'POST') and (not f.errors)):
save_values(item_container, data_init, f.data)
return HttpResponseRedirect(get_site_url(item_container, item_container.item.name))
else:
vars = get_item_vars_edit(request, item_container, app_name, my_title, content, f)
return render_to_response('app/base_edit.html', vars)<|docstring|>Eigenschaften der Frage aendern<|endoftext|> |
2a2496d1c5e260ed4992e9189e28381dc2b2997869f571db86992880552a7944 | async def get_roll(bot, context):
'Gets a random roll in the D&D syntax style.'
(rolls, sides, bonus) = context.arguments[0]
max_characters = len(str(sides))
results = [random.randint(1, sides) for it in range(rolls)]
text_results = ['`{: <{}}\u200b`'.format(it, max_characters) for it in results]
split_results = [text_results[it:(it + 10)] for it in range(0, len(text_results), 10)]
result_text = '\n'.join((', '.join(it) for it in split_results))
embed = discord.Embed(title=':game_die: Dice roll', description=result_text)
total = sum(results)
if (rolls > 1):
embed.add_field(name='Sum', value=str(total))
embed.add_field(name='Mean', value='{:.2f}'.format((total / len(results))))
if bonus:
embed.add_field(name='Final', value=str((total + bonus)))
return Response(embed=embed) | Gets a random roll in the D&D syntax style. | randomizer/randomizer.py | get_roll | jkchen2/JshBot-plugins | 1 | python | async def get_roll(bot, context):
(rolls, sides, bonus) = context.arguments[0]
max_characters = len(str(sides))
results = [random.randint(1, sides) for it in range(rolls)]
text_results = ['`{: <{}}\u200b`'.format(it, max_characters) for it in results]
split_results = [text_results[it:(it + 10)] for it in range(0, len(text_results), 10)]
result_text = '\n'.join((', '.join(it) for it in split_results))
embed = discord.Embed(title=':game_die: Dice roll', description=result_text)
total = sum(results)
if (rolls > 1):
embed.add_field(name='Sum', value=str(total))
embed.add_field(name='Mean', value='{:.2f}'.format((total / len(results))))
if bonus:
embed.add_field(name='Final', value=str((total + bonus)))
return Response(embed=embed) | async def get_roll(bot, context):
(rolls, sides, bonus) = context.arguments[0]
max_characters = len(str(sides))
results = [random.randint(1, sides) for it in range(rolls)]
text_results = ['`{: <{}}\u200b`'.format(it, max_characters) for it in results]
split_results = [text_results[it:(it + 10)] for it in range(0, len(text_results), 10)]
result_text = '\n'.join((', '.join(it) for it in split_results))
embed = discord.Embed(title=':game_die: Dice roll', description=result_text)
total = sum(results)
if (rolls > 1):
embed.add_field(name='Sum', value=str(total))
embed.add_field(name='Mean', value='{:.2f}'.format((total / len(results))))
if bonus:
embed.add_field(name='Final', value=str((total + bonus)))
return Response(embed=embed)<|docstring|>Gets a random roll in the D&D syntax style.<|endoftext|> |
8bb10888ae5d7a4ddb66212570b786e468a708074d9ff78476f846885c553f9a | def _lazy_re_compile(regex, flags=0):
'Lazily compile a regex with flags.'
def _compile():
if isinstance(regex, str):
return re.compile(regex, flags)
else:
assert (not flags), 'flags must be empty if regex is passed pre-compiled'
return regex
return SimpleLazyObject(_compile) | Lazily compile a regex with flags. | django_flex_user/validators.py | _lazy_re_compile | ebenh/django-flex-user | 1 | python | def _lazy_re_compile(regex, flags=0):
def _compile():
if isinstance(regex, str):
return re.compile(regex, flags)
else:
assert (not flags), 'flags must be empty if regex is passed pre-compiled'
return regex
return SimpleLazyObject(_compile) | def _lazy_re_compile(regex, flags=0):
def _compile():
if isinstance(regex, str):
return re.compile(regex, flags)
else:
assert (not flags), 'flags must be empty if regex is passed pre-compiled'
return regex
return SimpleLazyObject(_compile)<|docstring|>Lazily compile a regex with flags.<|endoftext|> |
af8f5419911361bb8fe60b1c91fe55f920eec6eee46f1abd03f63a0846979508 | def flex_user_clean_username(value):
'\n Our clean username function for social-auth-app-django. Cleans input username by removing unsupported characters.\n\n See SOCIAL_AUTH_CLEAN_USERNAME_FUNCTION.\n\n :param value:\n :return:\n '
value = NO_SPECIAL_REGEX.sub('', value)
return value | Our clean username function for social-auth-app-django. Cleans input username by removing unsupported characters.
See SOCIAL_AUTH_CLEAN_USERNAME_FUNCTION.
:param value:
:return: | django_flex_user/validators.py | flex_user_clean_username | ebenh/django-flex-user | 1 | python | def flex_user_clean_username(value):
'\n Our clean username function for social-auth-app-django. Cleans input username by removing unsupported characters.\n\n See SOCIAL_AUTH_CLEAN_USERNAME_FUNCTION.\n\n :param value:\n :return:\n '
value = NO_SPECIAL_REGEX.sub(, value)
return value | def flex_user_clean_username(value):
'\n Our clean username function for social-auth-app-django. Cleans input username by removing unsupported characters.\n\n See SOCIAL_AUTH_CLEAN_USERNAME_FUNCTION.\n\n :param value:\n :return:\n '
value = NO_SPECIAL_REGEX.sub(, value)
return value<|docstring|>Our clean username function for social-auth-app-django. Cleans input username by removing unsupported characters.
See SOCIAL_AUTH_CLEAN_USERNAME_FUNCTION.
:param value:
:return:<|endoftext|> |
b017ff4a72591909e360bebc6eb3b9022c0ea9f41ad5df0ef6c1b05438fdac9d | def getIPInfo(address, access_key):
'To fetch IP Info from API, provide GEOIP Key in Environment Variables'
api = 'http://api.ipstack.com/'
request_string = (((api + address) + '?access_key=') + access_key)
cachedgeoIP = getgeoIPData(address, LOGTABLE)
if (cachedgeoIP is not None):
return cachedgeoIP
try:
api_response = requests.get(request_string)
except:
return None
json_response = json.loads(api_response.text)
try:
if (not json_response(['success'])):
return None
except:
pass
geoip['city_name'] = str(json_response['city'])
geoip['region_name'] = str(json_response['region_name'])
geoip['location'] = [json_response['longitude'], json_response['latitude']]
geoip['latitude'] = json_response['latitude']
geoip['longitude'] = json_response['longitude']
geoip['country_name'] = str(json_response['country_name'])
putgeoIPData(address, geoip, LOGTABLE)
return geoip | To fetch IP Info from API, provide GEOIP Key in Environment Variables | lambda-functions/SIEMFunctionToFetchGeoIPData.py | getIPInfo | mistsys/proto-siem | 1 | python | def getIPInfo(address, access_key):
api = 'http://api.ipstack.com/'
request_string = (((api + address) + '?access_key=') + access_key)
cachedgeoIP = getgeoIPData(address, LOGTABLE)
if (cachedgeoIP is not None):
return cachedgeoIP
try:
api_response = requests.get(request_string)
except:
return None
json_response = json.loads(api_response.text)
try:
if (not json_response(['success'])):
return None
except:
pass
geoip['city_name'] = str(json_response['city'])
geoip['region_name'] = str(json_response['region_name'])
geoip['location'] = [json_response['longitude'], json_response['latitude']]
geoip['latitude'] = json_response['latitude']
geoip['longitude'] = json_response['longitude']
geoip['country_name'] = str(json_response['country_name'])
putgeoIPData(address, geoip, LOGTABLE)
return geoip | def getIPInfo(address, access_key):
api = 'http://api.ipstack.com/'
request_string = (((api + address) + '?access_key=') + access_key)
cachedgeoIP = getgeoIPData(address, LOGTABLE)
if (cachedgeoIP is not None):
return cachedgeoIP
try:
api_response = requests.get(request_string)
except:
return None
json_response = json.loads(api_response.text)
try:
if (not json_response(['success'])):
return None
except:
pass
geoip['city_name'] = str(json_response['city'])
geoip['region_name'] = str(json_response['region_name'])
geoip['location'] = [json_response['longitude'], json_response['latitude']]
geoip['latitude'] = json_response['latitude']
geoip['longitude'] = json_response['longitude']
geoip['country_name'] = str(json_response['country_name'])
putgeoIPData(address, geoip, LOGTABLE)
return geoip<|docstring|>To fetch IP Info from API, provide GEOIP Key in Environment Variables<|endoftext|> |
112371e7bc4baaef37b081cc12d061ef0063b383a178ddbf434791909dda02ec | def putgeoIPData(address, geoIPData, table):
'Log all information to the provided DynamoDB table.\n Args:\n logData (dict): All extracted information\n table (string): Table name for event history.\n Returns:\n TYPE: Success\n '
client = boto3.client('dynamodb')
response = client.put_item(TableName=table, Item={'IPaddress': {'S': address}, 'city_name': {'S': geoIPData['city_name']}, 'region_name': {'S': geoIPData['region_name']}, 'latitude': {'S': str(geoIPData['latitude'])}, 'longitude': {'S': str(geoIPData['longitude'])}, 'country_name': {'S': geoIPData['country_name']}, 'isOnAWS': {'BOOL': addressInAWSNetwork(address, netlist)}, 'service': {'S': str(toupdateprefixeinfo[address]['service'])}, 'awsregion': {'S': str(toupdateprefixeinfo[address]['region'])}})
return 0 | Log all information to the provided DynamoDB table.
Args:
logData (dict): All extracted information
table (string): Table name for event history.
Returns:
TYPE: Success | lambda-functions/SIEMFunctionToFetchGeoIPData.py | putgeoIPData | mistsys/proto-siem | 1 | python | def putgeoIPData(address, geoIPData, table):
'Log all information to the provided DynamoDB table.\n Args:\n logData (dict): All extracted information\n table (string): Table name for event history.\n Returns:\n TYPE: Success\n '
client = boto3.client('dynamodb')
response = client.put_item(TableName=table, Item={'IPaddress': {'S': address}, 'city_name': {'S': geoIPData['city_name']}, 'region_name': {'S': geoIPData['region_name']}, 'latitude': {'S': str(geoIPData['latitude'])}, 'longitude': {'S': str(geoIPData['longitude'])}, 'country_name': {'S': geoIPData['country_name']}, 'isOnAWS': {'BOOL': addressInAWSNetwork(address, netlist)}, 'service': {'S': str(toupdateprefixeinfo[address]['service'])}, 'awsregion': {'S': str(toupdateprefixeinfo[address]['region'])}})
return 0 | def putgeoIPData(address, geoIPData, table):
'Log all information to the provided DynamoDB table.\n Args:\n logData (dict): All extracted information\n table (string): Table name for event history.\n Returns:\n TYPE: Success\n '
client = boto3.client('dynamodb')
response = client.put_item(TableName=table, Item={'IPaddress': {'S': address}, 'city_name': {'S': geoIPData['city_name']}, 'region_name': {'S': geoIPData['region_name']}, 'latitude': {'S': str(geoIPData['latitude'])}, 'longitude': {'S': str(geoIPData['longitude'])}, 'country_name': {'S': geoIPData['country_name']}, 'isOnAWS': {'BOOL': addressInAWSNetwork(address, netlist)}, 'service': {'S': str(toupdateprefixeinfo[address]['service'])}, 'awsregion': {'S': str(toupdateprefixeinfo[address]['region'])}})
return 0<|docstring|>Log all information to the provided DynamoDB table.
Args:
logData (dict): All extracted information
table (string): Table name for event history.
Returns:
TYPE: Success<|endoftext|> |
cfb5e2f7daf2176ae2857cb0e5a62dd55d7be34e3e99b3cf4d538c598d65be7f | def getgeoIPData(address, table):
'Log all information to the provided DynamoDB table.\n Args:\n logData (dict): All extracted information\n table (string): Table name for event history.\n Returns:\n TYPE: Success\n '
client = boto3.client('dynamodb')
try:
response = client.get_item(TableName=table, Key={'IPaddress': {'S': address}})
except ClientError as e:
print(e.response['Error']['Message'])
else:
try:
item = response['Item']
except:
item = None
geoip = {}
if (item is not None):
geoip['city_name'] = str(item['city_name']['S'])
geoip['region_name'] = str(item['region_name']['S'])
geoip['location'] = [float(item['longitude']['S']), float(item['latitude']['S'])]
geoip['latitude'] = float(item['latitude']['S'])
geoip['longitude'] = float(item['longitude']['S'])
geoip['country_name'] = str(item['country_name']['S'])
try:
geoip['isOnAWS'] = item['isOnAWS']
geoip['service'] = str(item['service']['S'])
geoip['awsregion'] = str(item['awsregion']['S'])
except:
pass
else:
geoip = None
return geoip | Log all information to the provided DynamoDB table.
Args:
logData (dict): All extracted information
table (string): Table name for event history.
Returns:
TYPE: Success | lambda-functions/SIEMFunctionToFetchGeoIPData.py | getgeoIPData | mistsys/proto-siem | 1 | python | def getgeoIPData(address, table):
'Log all information to the provided DynamoDB table.\n Args:\n logData (dict): All extracted information\n table (string): Table name for event history.\n Returns:\n TYPE: Success\n '
client = boto3.client('dynamodb')
try:
response = client.get_item(TableName=table, Key={'IPaddress': {'S': address}})
except ClientError as e:
print(e.response['Error']['Message'])
else:
try:
item = response['Item']
except:
item = None
geoip = {}
if (item is not None):
geoip['city_name'] = str(item['city_name']['S'])
geoip['region_name'] = str(item['region_name']['S'])
geoip['location'] = [float(item['longitude']['S']), float(item['latitude']['S'])]
geoip['latitude'] = float(item['latitude']['S'])
geoip['longitude'] = float(item['longitude']['S'])
geoip['country_name'] = str(item['country_name']['S'])
try:
geoip['isOnAWS'] = item['isOnAWS']
geoip['service'] = str(item['service']['S'])
geoip['awsregion'] = str(item['awsregion']['S'])
except:
pass
else:
geoip = None
return geoip | def getgeoIPData(address, table):
'Log all information to the provided DynamoDB table.\n Args:\n logData (dict): All extracted information\n table (string): Table name for event history.\n Returns:\n TYPE: Success\n '
client = boto3.client('dynamodb')
try:
response = client.get_item(TableName=table, Key={'IPaddress': {'S': address}})
except ClientError as e:
print(e.response['Error']['Message'])
else:
try:
item = response['Item']
except:
item = None
geoip = {}
if (item is not None):
geoip['city_name'] = str(item['city_name']['S'])
geoip['region_name'] = str(item['region_name']['S'])
geoip['location'] = [float(item['longitude']['S']), float(item['latitude']['S'])]
geoip['latitude'] = float(item['latitude']['S'])
geoip['longitude'] = float(item['longitude']['S'])
geoip['country_name'] = str(item['country_name']['S'])
try:
geoip['isOnAWS'] = item['isOnAWS']
geoip['service'] = str(item['service']['S'])
geoip['awsregion'] = str(item['awsregion']['S'])
except:
pass
else:
geoip = None
return geoip<|docstring|>Log all information to the provided DynamoDB table.
Args:
logData (dict): All extracted information
table (string): Table name for event history.
Returns:
TYPE: Success<|endoftext|> |
6dab2dbfdd50f2cdb39862c87ca866d6c847f6d6e8d23b264cc324d8b25e6b80 | def __init__(self, length, width, initial_open=0.4):
'\n Generate Obstacle\n PERM_WALL = 0\n WALL = 1\n FLOOR = 2\n '
self.__length = length
self.__width = width
self.__area = (length * width)
self.__map = []
self.__ds = DisjointSet()
self.__up_loc = 0
self.center_pt = (int((self.__length / 2)), int((self.__width / 2)))
self.__gen_initial_map(initial_open) | Generate Obstacle
PERM_WALL = 0
WALL = 1
FLOOR = 2 | utils/ca_cave.py | __init__ | rakkit/curiosity_gym | 0 | python | def __init__(self, length, width, initial_open=0.4):
'\n Generate Obstacle\n PERM_WALL = 0\n WALL = 1\n FLOOR = 2\n '
self.__length = length
self.__width = width
self.__area = (length * width)
self.__map = []
self.__ds = DisjointSet()
self.__up_loc = 0
self.center_pt = (int((self.__length / 2)), int((self.__width / 2)))
self.__gen_initial_map(initial_open) | def __init__(self, length, width, initial_open=0.4):
'\n Generate Obstacle\n PERM_WALL = 0\n WALL = 1\n FLOOR = 2\n '
self.__length = length
self.__width = width
self.__area = (length * width)
self.__map = []
self.__ds = DisjointSet()
self.__up_loc = 0
self.center_pt = (int((self.__length / 2)), int((self.__width / 2)))
self.__gen_initial_map(initial_open)<|docstring|>Generate Obstacle
PERM_WALL = 0
WALL = 1
FLOOR = 2<|endoftext|> |
f3713a231013ff9b8e19fa78ad0260d9abdedb55645f62a2fc704d1fc77b6126 | def is_hannas_code(code: Tuple[(int, ...)]) -> bool:
'\n Die Funktion prüft ob es eine Kombination von Hanna sein kann.\n :param code: Zu überprüfenden Zahlencode.\n :return: True, genau dann, wenn der Code von Hanna sein kann.\n '
assert (5 not in code)
assert ((code[0] % 2) == 1)
return ((3 in code) and (6 in code) and (sum(((a > b) for (a, b) in zip(code, code[1:]))) <= 1)) | Die Funktion prüft ob es eine Kombination von Hanna sein kann.
:param code: Zu überprüfenden Zahlencode.
:return: True, genau dann, wenn der Code von Hanna sein kann. | Code_Knacken.py | is_hannas_code | UlrichBerntien/Uebungen-Python | 0 | python | def is_hannas_code(code: Tuple[(int, ...)]) -> bool:
'\n Die Funktion prüft ob es eine Kombination von Hanna sein kann.\n :param code: Zu überprüfenden Zahlencode.\n :return: True, genau dann, wenn der Code von Hanna sein kann.\n '
assert (5 not in code)
assert ((code[0] % 2) == 1)
return ((3 in code) and (6 in code) and (sum(((a > b) for (a, b) in zip(code, code[1:]))) <= 1)) | def is_hannas_code(code: Tuple[(int, ...)]) -> bool:
'\n Die Funktion prüft ob es eine Kombination von Hanna sein kann.\n :param code: Zu überprüfenden Zahlencode.\n :return: True, genau dann, wenn der Code von Hanna sein kann.\n '
assert (5 not in code)
assert ((code[0] % 2) == 1)
return ((3 in code) and (6 in code) and (sum(((a > b) for (a, b) in zip(code, code[1:]))) <= 1))<|docstring|>Die Funktion prüft ob es eine Kombination von Hanna sein kann.
:param code: Zu überprüfenden Zahlencode.
:return: True, genau dann, wenn der Code von Hanna sein kann.<|endoftext|> |
e3bfb4bcdbabd403d98f491c31e6af0ac486da8d23b6b6c7a5e2ab6249169afd | def test_id(self):
'Each test annotation should be created with a unique ID.'
annotation_1 = factories.Annotation()
annotation_2 = factories.Annotation()
assert annotation_1.get('id')
assert annotation_2.get('id')
assert (annotation_1['id'] != annotation_2['id']) | Each test annotation should be created with a unique ID. | h/test/factories_test.py | test_id | noscripter/h | 0 | python | def test_id(self):
annotation_1 = factories.Annotation()
annotation_2 = factories.Annotation()
assert annotation_1.get('id')
assert annotation_2.get('id')
assert (annotation_1['id'] != annotation_2['id']) | def test_id(self):
annotation_1 = factories.Annotation()
annotation_2 = factories.Annotation()
assert annotation_1.get('id')
assert annotation_2.get('id')
assert (annotation_1['id'] != annotation_2['id'])<|docstring|>Each test annotation should be created with a unique ID.<|endoftext|> |
452ebfb5c3805310b78c5435c434fd0a72dd943d8ff76ed5db0b770fbf28ac9d | def test_text(self):
'Each annotation should have unique note text.'
annotation_1 = factories.Annotation()
annotation_2 = factories.Annotation()
assert annotation_1.get('text')
assert annotation_2.get('text')
assert (annotation_1['text'] != annotation_2['text']) | Each annotation should have unique note text. | h/test/factories_test.py | test_text | noscripter/h | 0 | python | def test_text(self):
annotation_1 = factories.Annotation()
annotation_2 = factories.Annotation()
assert annotation_1.get('text')
assert annotation_2.get('text')
assert (annotation_1['text'] != annotation_2['text']) | def test_text(self):
annotation_1 = factories.Annotation()
annotation_2 = factories.Annotation()
assert annotation_1.get('text')
assert annotation_2.get('text')
assert (annotation_1['text'] != annotation_2['text'])<|docstring|>Each annotation should have unique note text.<|endoftext|> |
a3c775424dc13f20060bb8a9d43b79ab69da18014f832226732d2add05636909 | def test_custom_user(self):
'A custom username should be used in the user field.'
annotation = factories.Annotation(username='bobo')
assert ('bobo' in annotation['user'])
assert ('username' not in annotation) | A custom username should be used in the user field. | h/test/factories_test.py | test_custom_user | noscripter/h | 0 | python | def test_custom_user(self):
annotation = factories.Annotation(username='bobo')
assert ('bobo' in annotation['user'])
assert ('username' not in annotation) | def test_custom_user(self):
annotation = factories.Annotation(username='bobo')
assert ('bobo' in annotation['user'])
assert ('username' not in annotation)<|docstring|>A custom username should be used in the user field.<|endoftext|> |
4cb6bfdabe26d7ab05ef064a4983a160d4094033480cca9b15f1240c7c1c28df | def test_created_date(self):
'Annotations should have a created date from the current time.'
before = datetime.datetime.now()
annotation = factories.Annotation()
after = datetime.datetime.now()
created = datetime.datetime.strptime(annotation['created'], '%Y-%m-%dT%H:%M:%S.%f')
assert (before < created < after) | Annotations should have a created date from the current time. | h/test/factories_test.py | test_created_date | noscripter/h | 0 | python | def test_created_date(self):
before = datetime.datetime.now()
annotation = factories.Annotation()
after = datetime.datetime.now()
created = datetime.datetime.strptime(annotation['created'], '%Y-%m-%dT%H:%M:%S.%f')
assert (before < created < after) | def test_created_date(self):
before = datetime.datetime.now()
annotation = factories.Annotation()
after = datetime.datetime.now()
created = datetime.datetime.strptime(annotation['created'], '%Y-%m-%dT%H:%M:%S.%f')
assert (before < created < after)<|docstring|>Annotations should have a created date from the current time.<|endoftext|> |
7ebbb8804d4475c41202f394c7fec5f7811f4e4dd065e49d7de49a7579b8ce73 | def test_updated_date(self):
'Annotations should have an updated date from the current time.'
before = datetime.datetime.now()
annotation = factories.Annotation()
after = datetime.datetime.now()
updated = datetime.datetime.strptime(annotation['updated'], '%Y-%m-%dT%H:%M:%S.%f')
assert (before < updated < after) | Annotations should have an updated date from the current time. | h/test/factories_test.py | test_updated_date | noscripter/h | 0 | python | def test_updated_date(self):
before = datetime.datetime.now()
annotation = factories.Annotation()
after = datetime.datetime.now()
updated = datetime.datetime.strptime(annotation['updated'], '%Y-%m-%dT%H:%M:%S.%f')
assert (before < updated < after) | def test_updated_date(self):
before = datetime.datetime.now()
annotation = factories.Annotation()
after = datetime.datetime.now()
updated = datetime.datetime.strptime(annotation['updated'], '%Y-%m-%dT%H:%M:%S.%f')
assert (before < updated < after)<|docstring|>Annotations should have an updated date from the current time.<|endoftext|> |
92a5fe42b274be6e49c8371458252b126cce30ba945959d4a728469d2e774ce2 | def test_tags(self):
'It should be possible to choose the number of tags with num_tags.'
annotation = factories.Annotation(num_tags=20)
assert (len(annotation['tags']) == 20)
assert ('num_tags' not in annotation) | It should be possible to choose the number of tags with num_tags. | h/test/factories_test.py | test_tags | noscripter/h | 0 | python | def test_tags(self):
annotation = factories.Annotation(num_tags=20)
assert (len(annotation['tags']) == 20)
assert ('num_tags' not in annotation) | def test_tags(self):
annotation = factories.Annotation(num_tags=20)
assert (len(annotation['tags']) == 20)
assert ('num_tags' not in annotation)<|docstring|>It should be possible to choose the number of tags with num_tags.<|endoftext|> |
81431159a60cdfb2ae1ef9e2736651eebf3f9f6bb71ef673196303ab8141317d | @property
def is_completed(self):
'\n check if the current project has been completed and closed\n '
if (self.end_date is None):
return False
return (self.end_date < datetime.now().date()) | check if the current project has been completed and closed | projclock/tracker/models.py | is_completed | Allaye/Trak-r | 0 | python | @property
def is_completed(self):
'\n \n '
if (self.end_date is None):
return False
return (self.end_date < datetime.now().date()) | @property
def is_completed(self):
'\n \n '
if (self.end_date is None):
return False
return (self.end_date < datetime.now().date())<|docstring|>check if the current project has been completed and closed<|endoftext|> |
93adca22f4c52a0a702040598d97aad3c97ee20fea1757cc1a2656977b57906b | def __str__(self):
'\n convert to a string representation\n\n Returns:\n string: string representation of the object\n " <title> : <project> : <user> : <startdate> : <enddate>"\n '
if (self.end_time is None):
return f"{self.description} : {self.project.title} : {self.user.username} : {self.start_time} : {'Activity in progress'}"
else:
return f'{self.description} : {self.project.title} : {self.user.username} : {self.start_time} : {self.end_time}' | convert to a string representation
Returns:
string: string representation of the object
" <title> : <project> : <user> : <startdate> : <enddate>" | projclock/tracker/models.py | __str__ | Allaye/Trak-r | 0 | python | def __str__(self):
'\n convert to a string representation\n\n Returns:\n string: string representation of the object\n " <title> : <project> : <user> : <startdate> : <enddate>"\n '
if (self.end_time is None):
return f"{self.description} : {self.project.title} : {self.user.username} : {self.start_time} : {'Activity in progress'}"
else:
return f'{self.description} : {self.project.title} : {self.user.username} : {self.start_time} : {self.end_time}' | def __str__(self):
'\n convert to a string representation\n\n Returns:\n string: string representation of the object\n " <title> : <project> : <user> : <startdate> : <enddate>"\n '
if (self.end_time is None):
return f"{self.description} : {self.project.title} : {self.user.username} : {self.start_time} : {'Activity in progress'}"
else:
return f'{self.description} : {self.project.title} : {self.user.username} : {self.start_time} : {self.end_time}'<|docstring|>convert to a string representation
Returns:
string: string representation of the object
" <title> : <project> : <user> : <startdate> : <enddate>"<|endoftext|> |
7d039e16e901dd96a3f06b37c01bfce6f8d0444fd0867ae17b529aa86902c77f | @property
def is_running(self):
'\n check if the activity is running\n\n Returns:\n bool: True if the activity is running, False otherwise\n '
return (self.end_time is None) | check if the activity is running
Returns:
bool: True if the activity is running, False otherwise | projclock/tracker/models.py | is_running | Allaye/Trak-r | 0 | python | @property
def is_running(self):
'\n check if the activity is running\n\n Returns:\n bool: True if the activity is running, False otherwise\n '
return (self.end_time is None) | @property
def is_running(self):
'\n check if the activity is running\n\n Returns:\n bool: True if the activity is running, False otherwise\n '
return (self.end_time is None)<|docstring|>check if the activity is running
Returns:
bool: True if the activity is running, False otherwise<|endoftext|> |
1e95891ff6faaa007e7e4d7179f5c1537f1769a426e3e06ab81ce5d0d0b8738e | @cached_property
def duration(self):
'\n get the duration of the activity\n\n Returns:\n timedelta: duration of the activity\n '
if self.is_running:
sec = (datetime.now(timezone.utc) - self.start_time)
return str(timedelta(seconds=round(sec.total_seconds())))
else:
sec = (self.end_time - self.start_time)
return str(timedelta(seconds=round(sec.total_seconds()))) | get the duration of the activity
Returns:
timedelta: duration of the activity | projclock/tracker/models.py | duration | Allaye/Trak-r | 0 | python | @cached_property
def duration(self):
'\n get the duration of the activity\n\n Returns:\n timedelta: duration of the activity\n '
if self.is_running:
sec = (datetime.now(timezone.utc) - self.start_time)
return str(timedelta(seconds=round(sec.total_seconds())))
else:
sec = (self.end_time - self.start_time)
return str(timedelta(seconds=round(sec.total_seconds()))) | @cached_property
def duration(self):
'\n get the duration of the activity\n\n Returns:\n timedelta: duration of the activity\n '
if self.is_running:
sec = (datetime.now(timezone.utc) - self.start_time)
return str(timedelta(seconds=round(sec.total_seconds())))
else:
sec = (self.end_time - self.start_time)
return str(timedelta(seconds=round(sec.total_seconds())))<|docstring|>get the duration of the activity
Returns:
timedelta: duration of the activity<|endoftext|> |
d25c2aee66de158e4e5ac47f1056bc5785d7cb95ce1c211ee8cbcb1b777bc201 | def testProcess(self):
'Tests the Process function on a MacOS Notification Center db.'
plugin = mac_notificationcenter.MacNotificationCenterPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(['mac_notificationcenter.db'], plugin)
self.assertEqual(6, storage_writer.number_of_events)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-05-02 10:59:18.930156')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.body, 'KeePassXC can now be run')
self.assertEqual(event_data.bundle_name, 'com.google.santagui')
expected_message = 'Title: Santa registered by: com.google.santagui. Presented: Yes, Content: KeePassXC can now be run'
expected_short_message = 'Title: Santa, Content: KeePassXC can now be run'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message)
event = events[2]
self.CheckTimestamp(event.timestamp, '2018-05-02 11:13:21.531085')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.title, 'Drive File Stream')
self.assertEqual(event_data.bundle_name, 'com.google.drivefs')
expected_message = 'Title: Drive File Stream registered by: com.google.drivefs. Presented: Yes, Content: Drive File Stream is loading your files…'
expected_short_message = 'Title: Drive File Stream, Content: Drive File Stream is loading your files…'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message)
event = events[5]
self.CheckTimestamp(event.timestamp, '2018-05-16 16:38:04.686080')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.body, 'PyCharm can now be run')
self.assertEqual(event_data.bundle_name, 'com.google.santagui')
expected_message = 'Title: Santa registered by: com.google.santagui. Presented: Yes, Content: PyCharm can now be run'
expected_short_message = 'Title: Santa, Content: PyCharm can now be run'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message) | Tests the Process function on a MacOS Notification Center db. | tests/parsers/sqlite_plugins/mac_notificationcenter.py | testProcess | chjs207/filehistory_preprocessor | 27 | python | def testProcess(self):
plugin = mac_notificationcenter.MacNotificationCenterPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(['mac_notificationcenter.db'], plugin)
self.assertEqual(6, storage_writer.number_of_events)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-05-02 10:59:18.930156')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.body, 'KeePassXC can now be run')
self.assertEqual(event_data.bundle_name, 'com.google.santagui')
expected_message = 'Title: Santa registered by: com.google.santagui. Presented: Yes, Content: KeePassXC can now be run'
expected_short_message = 'Title: Santa, Content: KeePassXC can now be run'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message)
event = events[2]
self.CheckTimestamp(event.timestamp, '2018-05-02 11:13:21.531085')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.title, 'Drive File Stream')
self.assertEqual(event_data.bundle_name, 'com.google.drivefs')
expected_message = 'Title: Drive File Stream registered by: com.google.drivefs. Presented: Yes, Content: Drive File Stream is loading your files…'
expected_short_message = 'Title: Drive File Stream, Content: Drive File Stream is loading your files…'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message)
event = events[5]
self.CheckTimestamp(event.timestamp, '2018-05-16 16:38:04.686080')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.body, 'PyCharm can now be run')
self.assertEqual(event_data.bundle_name, 'com.google.santagui')
expected_message = 'Title: Santa registered by: com.google.santagui. Presented: Yes, Content: PyCharm can now be run'
expected_short_message = 'Title: Santa, Content: PyCharm can now be run'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message) | def testProcess(self):
plugin = mac_notificationcenter.MacNotificationCenterPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(['mac_notificationcenter.db'], plugin)
self.assertEqual(6, storage_writer.number_of_events)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-05-02 10:59:18.930156')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.body, 'KeePassXC can now be run')
self.assertEqual(event_data.bundle_name, 'com.google.santagui')
expected_message = 'Title: Santa registered by: com.google.santagui. Presented: Yes, Content: KeePassXC can now be run'
expected_short_message = 'Title: Santa, Content: KeePassXC can now be run'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message)
event = events[2]
self.CheckTimestamp(event.timestamp, '2018-05-02 11:13:21.531085')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.title, 'Drive File Stream')
self.assertEqual(event_data.bundle_name, 'com.google.drivefs')
expected_message = 'Title: Drive File Stream registered by: com.google.drivefs. Presented: Yes, Content: Drive File Stream is loading your files…'
expected_short_message = 'Title: Drive File Stream, Content: Drive File Stream is loading your files…'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message)
event = events[5]
self.CheckTimestamp(event.timestamp, '2018-05-16 16:38:04.686080')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_CREATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.body, 'PyCharm can now be run')
self.assertEqual(event_data.bundle_name, 'com.google.santagui')
expected_message = 'Title: Santa registered by: com.google.santagui. Presented: Yes, Content: PyCharm can now be run'
expected_short_message = 'Title: Santa, Content: PyCharm can now be run'
self._TestGetMessageStrings(event_data, expected_message, expected_short_message)<|docstring|>Tests the Process function on a MacOS Notification Center db.<|endoftext|> |
9f0caafe27a535214b6d875500853a6e502260dd293ad4de51b50086a339b164 | def get_pygments_style_colors(style, *, fallbacks):
'Get background/foreground colors for given pygments style.'
background = style.background_color
text_colors = style.style_for_token(Text)
foreground = text_colors['color']
if (not background):
background = fallbacks['background']
if (not foreground):
foreground = fallbacks['foreground']
else:
foreground = f'#{foreground}'
return {'background': background, 'foreground': foreground} | Get background/foreground colors for given pygments style. | src/furo/code.py | get_pygments_style_colors | sethmlarson/furo | 0 | python | def get_pygments_style_colors(style, *, fallbacks):
background = style.background_color
text_colors = style.style_for_token(Text)
foreground = text_colors['color']
if (not background):
background = fallbacks['background']
if (not foreground):
foreground = fallbacks['foreground']
else:
foreground = f'#{foreground}'
return {'background': background, 'foreground': foreground} | def get_pygments_style_colors(style, *, fallbacks):
background = style.background_color
text_colors = style.style_for_token(Text)
foreground = text_colors['color']
if (not background):
background = fallbacks['background']
if (not foreground):
foreground = fallbacks['foreground']
else:
foreground = f'#{foreground}'
return {'background': background, 'foreground': foreground}<|docstring|>Get background/foreground colors for given pygments style.<|endoftext|> |
9980361ac7af9424504eb06be881278ab083e2fe7255e379c4b2264a69b8cbc0 | def generate_scripts():
'\n Generate the Lua sql statement of the Event-Loop from scratch and save it.\n '
logging.basicConfig(format='%(asctime)s - %(module)s - %(message)s', level=logging.DEBUG)
BundleLuaScripts.save_statement() | Generate the Lua sql statement of the Event-Loop from scratch and save it. | exasol_advanced_analytics_framework/deployment/regenerate_scripts.py | generate_scripts | exasol/advanced-analytics-framework | 0 | python | def generate_scripts():
'\n \n '
logging.basicConfig(format='%(asctime)s - %(module)s - %(message)s', level=logging.DEBUG)
BundleLuaScripts.save_statement() | def generate_scripts():
'\n \n '
logging.basicConfig(format='%(asctime)s - %(module)s - %(message)s', level=logging.DEBUG)
BundleLuaScripts.save_statement()<|docstring|>Generate the Lua sql statement of the Event-Loop from scratch and save it.<|endoftext|> |
535d4e34e5b488ee92f08223dc7d3fcdb79dde68d421232ae6887d0038d770c9 | def remove_pg_pins(hier_graph_dict: dict, circuit_name, pg_pins):
'\n removes power pins to be sent as signal by recursively finding all connections to power pins \n and removing them from subcircuit defination and instance calls\n for each circuit different power connection creates an extra subcircuit\n Required by PnR as it does not make power connections as ports\n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n G : networkx graph\n graph of circuit.\n pg_pins : list\n graph of circuit.\n Returns\n -------\n None.\n\n '
G = hier_graph_dict[circuit_name]['graph']
logger.debug(f'checking pg ports in {circuit_name} {pg_pins}')
for (node, attr) in G.nodes(data=True):
if (('sub_graph' not in attr) or (attr['inst_type'] == 'net') or (not attr['connection'])):
continue
elif (len((set(attr['connection'].values()) & set(pg_pins))) > 0):
logger.debug(f"node: {node} connections {attr['connection']} {attr['ports']}")
pg_conn = {}
for (k, v) in attr['connection'].items():
if ((v in pg_pins) and (k not in pg_pins)):
pg_conn[k] = v
if pg_conn:
logger.debug(f'removing power pin connected as signal net {pg_conn} in {node}')
for (k, v) in pg_conn.items():
del attr['connection'][k]
del attr['edge_weight'][attr['ports'].index(v)]
attr['ports'].remove(v)
updated_name = modify_pg_conn_subckt(hier_graph_dict, attr['inst_type'], pg_conn)
attr['inst_type'] = updated_name
remove_pg_pins(hier_graph_dict, updated_name, pg_pins) | removes power pins to be sent as signal by recursively finding all connections to power pins
and removing them from subcircuit defination and instance calls
for each circuit different power connection creates an extra subcircuit
Required by PnR as it does not make power connections as ports
Parameters
----------
hier_graph_dict : dict
dictionary of all circuit in spice file
circuit_name : str
name of circuit to be processed.
G : networkx graph
graph of circuit.
pg_pins : list
graph of circuit.
Returns
-------
None. | align/compiler/preprocess.py | remove_pg_pins | mabrains/ALIGN-public | 0 | python | def remove_pg_pins(hier_graph_dict: dict, circuit_name, pg_pins):
'\n removes power pins to be sent as signal by recursively finding all connections to power pins \n and removing them from subcircuit defination and instance calls\n for each circuit different power connection creates an extra subcircuit\n Required by PnR as it does not make power connections as ports\n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n G : networkx graph\n graph of circuit.\n pg_pins : list\n graph of circuit.\n Returns\n -------\n None.\n\n '
G = hier_graph_dict[circuit_name]['graph']
logger.debug(f'checking pg ports in {circuit_name} {pg_pins}')
for (node, attr) in G.nodes(data=True):
if (('sub_graph' not in attr) or (attr['inst_type'] == 'net') or (not attr['connection'])):
continue
elif (len((set(attr['connection'].values()) & set(pg_pins))) > 0):
logger.debug(f"node: {node} connections {attr['connection']} {attr['ports']}")
pg_conn = {}
for (k, v) in attr['connection'].items():
if ((v in pg_pins) and (k not in pg_pins)):
pg_conn[k] = v
if pg_conn:
logger.debug(f'removing power pin connected as signal net {pg_conn} in {node}')
for (k, v) in pg_conn.items():
del attr['connection'][k]
del attr['edge_weight'][attr['ports'].index(v)]
attr['ports'].remove(v)
updated_name = modify_pg_conn_subckt(hier_graph_dict, attr['inst_type'], pg_conn)
attr['inst_type'] = updated_name
remove_pg_pins(hier_graph_dict, updated_name, pg_pins) | def remove_pg_pins(hier_graph_dict: dict, circuit_name, pg_pins):
'\n removes power pins to be sent as signal by recursively finding all connections to power pins \n and removing them from subcircuit defination and instance calls\n for each circuit different power connection creates an extra subcircuit\n Required by PnR as it does not make power connections as ports\n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n G : networkx graph\n graph of circuit.\n pg_pins : list\n graph of circuit.\n Returns\n -------\n None.\n\n '
G = hier_graph_dict[circuit_name]['graph']
logger.debug(f'checking pg ports in {circuit_name} {pg_pins}')
for (node, attr) in G.nodes(data=True):
if (('sub_graph' not in attr) or (attr['inst_type'] == 'net') or (not attr['connection'])):
continue
elif (len((set(attr['connection'].values()) & set(pg_pins))) > 0):
logger.debug(f"node: {node} connections {attr['connection']} {attr['ports']}")
pg_conn = {}
for (k, v) in attr['connection'].items():
if ((v in pg_pins) and (k not in pg_pins)):
pg_conn[k] = v
if pg_conn:
logger.debug(f'removing power pin connected as signal net {pg_conn} in {node}')
for (k, v) in pg_conn.items():
del attr['connection'][k]
del attr['edge_weight'][attr['ports'].index(v)]
attr['ports'].remove(v)
updated_name = modify_pg_conn_subckt(hier_graph_dict, attr['inst_type'], pg_conn)
attr['inst_type'] = updated_name
remove_pg_pins(hier_graph_dict, updated_name, pg_pins)<|docstring|>removes power pins to be sent as signal by recursively finding all connections to power pins
and removing them from subcircuit defination and instance calls
for each circuit different power connection creates an extra subcircuit
Required by PnR as it does not make power connections as ports
Parameters
----------
hier_graph_dict : dict
dictionary of all circuit in spice file
circuit_name : str
name of circuit to be processed.
G : networkx graph
graph of circuit.
pg_pins : list
graph of circuit.
Returns
-------
None.<|endoftext|> |
4ba5ca8699a3f8acfa2617b78f09bfbd930ad11701dce29519e025455efc6f56 | def modify_pg_conn_subckt(hier_graph_dict: dict, circuit_name, pg_conn):
'\n creates a new subcircuit by removing power pins from a subcircuit defination \n and change internal connections within the subcircuit\n \n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n pg_conn : dict\n ports to be modified and corresponding pg pin.\n Returns\n -------\n new subcircuit name\n\n '
new = copy.deepcopy(hier_graph_dict[circuit_name])
logger.debug(f'modifying subckt {circuit_name} {new} {pg_conn}')
for (k, v) in pg_conn.items():
logger.debug(f'fixing port {k} to {v} for all inst in {circuit_name}')
new['ports'].remove(k)
del new['ports_weight'][k]
if (v in new['graph'].nodes()):
old_edge_wt = list(copy.deepcopy(new['graph'].edges(v, data=True)))
new['graph'] = nx.relabel_nodes(new['graph'], {k: v}, copy=False)
for (n1, n2, v1) in new['graph'].edges(v, data=True):
for (n11, n21, v11) in old_edge_wt:
if ((n1 == n11) and (n2 == n21)):
v1['weight'] = (v1['weight'] | v11['weight'])
logger.debug(f"updated weights {old_edge_wt} {new['graph'].edges(v, data=True)}")
else:
new['graph'] = nx.relabel_nodes(new['graph'], {k: v}, copy=False)
for (node, attr) in new['graph'].nodes(data=True):
if (attr['inst_type'] == 'net'):
continue
attr['ports'] = [(v if (x == k) else x) for x in attr['ports']]
if (('connection' in attr) and attr['connection']):
for (a, b) in attr['connection'].items():
if (b == k):
attr['connection'][a] = v
logger.debug(f'updated attributes of {node}: {attr}')
i = 1
updated_ckt_name = ((circuit_name + 'pg') + str(i))
while (updated_ckt_name in hier_graph_dict.keys()):
if (hier_graph_dict[updated_ckt_name]['ports'] == new['ports']):
break
else:
i = (i + 1)
updated_ckt_name = ((circuit_name + 'pg') + str(i))
hier_graph_dict[updated_ckt_name] = new
return updated_ckt_name | creates a new subcircuit by removing power pins from a subcircuit defination
and change internal connections within the subcircuit
Parameters
----------
hier_graph_dict : dict
dictionary of all circuit in spice file
circuit_name : str
name of circuit to be processed.
pg_conn : dict
ports to be modified and corresponding pg pin.
Returns
-------
new subcircuit name | align/compiler/preprocess.py | modify_pg_conn_subckt | mabrains/ALIGN-public | 0 | python | def modify_pg_conn_subckt(hier_graph_dict: dict, circuit_name, pg_conn):
'\n creates a new subcircuit by removing power pins from a subcircuit defination \n and change internal connections within the subcircuit\n \n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n pg_conn : dict\n ports to be modified and corresponding pg pin.\n Returns\n -------\n new subcircuit name\n\n '
new = copy.deepcopy(hier_graph_dict[circuit_name])
logger.debug(f'modifying subckt {circuit_name} {new} {pg_conn}')
for (k, v) in pg_conn.items():
logger.debug(f'fixing port {k} to {v} for all inst in {circuit_name}')
new['ports'].remove(k)
del new['ports_weight'][k]
if (v in new['graph'].nodes()):
old_edge_wt = list(copy.deepcopy(new['graph'].edges(v, data=True)))
new['graph'] = nx.relabel_nodes(new['graph'], {k: v}, copy=False)
for (n1, n2, v1) in new['graph'].edges(v, data=True):
for (n11, n21, v11) in old_edge_wt:
if ((n1 == n11) and (n2 == n21)):
v1['weight'] = (v1['weight'] | v11['weight'])
logger.debug(f"updated weights {old_edge_wt} {new['graph'].edges(v, data=True)}")
else:
new['graph'] = nx.relabel_nodes(new['graph'], {k: v}, copy=False)
for (node, attr) in new['graph'].nodes(data=True):
if (attr['inst_type'] == 'net'):
continue
attr['ports'] = [(v if (x == k) else x) for x in attr['ports']]
if (('connection' in attr) and attr['connection']):
for (a, b) in attr['connection'].items():
if (b == k):
attr['connection'][a] = v
logger.debug(f'updated attributes of {node}: {attr}')
i = 1
updated_ckt_name = ((circuit_name + 'pg') + str(i))
while (updated_ckt_name in hier_graph_dict.keys()):
if (hier_graph_dict[updated_ckt_name]['ports'] == new['ports']):
break
else:
i = (i + 1)
updated_ckt_name = ((circuit_name + 'pg') + str(i))
hier_graph_dict[updated_ckt_name] = new
return updated_ckt_name | def modify_pg_conn_subckt(hier_graph_dict: dict, circuit_name, pg_conn):
'\n creates a new subcircuit by removing power pins from a subcircuit defination \n and change internal connections within the subcircuit\n \n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n pg_conn : dict\n ports to be modified and corresponding pg pin.\n Returns\n -------\n new subcircuit name\n\n '
new = copy.deepcopy(hier_graph_dict[circuit_name])
logger.debug(f'modifying subckt {circuit_name} {new} {pg_conn}')
for (k, v) in pg_conn.items():
logger.debug(f'fixing port {k} to {v} for all inst in {circuit_name}')
new['ports'].remove(k)
del new['ports_weight'][k]
if (v in new['graph'].nodes()):
old_edge_wt = list(copy.deepcopy(new['graph'].edges(v, data=True)))
new['graph'] = nx.relabel_nodes(new['graph'], {k: v}, copy=False)
for (n1, n2, v1) in new['graph'].edges(v, data=True):
for (n11, n21, v11) in old_edge_wt:
if ((n1 == n11) and (n2 == n21)):
v1['weight'] = (v1['weight'] | v11['weight'])
logger.debug(f"updated weights {old_edge_wt} {new['graph'].edges(v, data=True)}")
else:
new['graph'] = nx.relabel_nodes(new['graph'], {k: v}, copy=False)
for (node, attr) in new['graph'].nodes(data=True):
if (attr['inst_type'] == 'net'):
continue
attr['ports'] = [(v if (x == k) else x) for x in attr['ports']]
if (('connection' in attr) and attr['connection']):
for (a, b) in attr['connection'].items():
if (b == k):
attr['connection'][a] = v
logger.debug(f'updated attributes of {node}: {attr}')
i = 1
updated_ckt_name = ((circuit_name + 'pg') + str(i))
while (updated_ckt_name in hier_graph_dict.keys()):
if (hier_graph_dict[updated_ckt_name]['ports'] == new['ports']):
break
else:
i = (i + 1)
updated_ckt_name = ((circuit_name + 'pg') + str(i))
hier_graph_dict[updated_ckt_name] = new
return updated_ckt_name<|docstring|>creates a new subcircuit by removing power pins from a subcircuit defination
and change internal connections within the subcircuit
Parameters
----------
hier_graph_dict : dict
dictionary of all circuit in spice file
circuit_name : str
name of circuit to be processed.
pg_conn : dict
ports to be modified and corresponding pg pin.
Returns
-------
new subcircuit name<|endoftext|> |
1d6a9cef477b2720bdc5324953deadf809c21b08b0ae02dafacaaefd6a851162 | def preprocess_stack_parallel(hier_graph_dict: dict, circuit_name, G):
'\n Preprocess the input graph by reducing parallel caps, series resistance, identify stacking, adding parallel transistors.\n\n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n G : networkx graph\n graph of circuit.\n\n Returns\n -------\n None.\n\n '
logger.debug(f'no of nodes: {len(G)}')
add_parallel_caps(G)
add_series_res(G)
add_stacked_transistor(G)
add_parallel_transistor(G)
initial_size = len(G)
delta = 1
while (delta > 0):
logger.debug(f'CHECKING stacked transistors {circuit_name} {G}')
add_stacked_transistor(G)
add_parallel_transistor(G)
delta = (initial_size - len(G))
initial_size = len(G)
attributes = [attr for (node, attr) in G.nodes(data=True) if ('net' not in attr['inst_type'])]
if (len(attributes) == 1):
if (('sub_graph' in attributes[0].keys()) and (attributes[0]['sub_graph'] is not None)):
logger.debug(f"sub_graph nodes {attributes[0]['sub_graph'].nodes()}")
stacked_ckt = preprocess_stack_parallel(hier_graph_dict, attributes[0]['real_inst_type'], attributes[0]['sub_graph'])
if (stacked_ckt == None):
return None
for ckt in hier_graph_dict.values():
for (node, attr) in ckt['graph'].nodes(data=True):
if (('net' not in attr['inst_type']) and (attr['inst_type'] == circuit_name)):
logger.debug(f'updating instance {node} {attr} with stacked device {attributes}')
attr['inst_type'] = attributes[0]['inst_type']
attr['real_inst_type'] = attributes[0]['real_inst_type']
attr['values'] = {**attributes[0]['values'], **attr['values']}
attr['sub_graph'] = None
attr['ports'] = [attr['connection'][port] for port in attributes[0]['ports'] if (port in attr['connection'])]
attr['edge_weight'] = attributes[0]['edge_weight']
attr['connection'] = None
return circuit_name
else:
return None | Preprocess the input graph by reducing parallel caps, series resistance, identify stacking, adding parallel transistors.
Parameters
----------
hier_graph_dict : dict
dictionary of all circuit in spice file
circuit_name : str
name of circuit to be processed.
G : networkx graph
graph of circuit.
Returns
-------
None. | align/compiler/preprocess.py | preprocess_stack_parallel | mabrains/ALIGN-public | 0 | python | def preprocess_stack_parallel(hier_graph_dict: dict, circuit_name, G):
'\n Preprocess the input graph by reducing parallel caps, series resistance, identify stacking, adding parallel transistors.\n\n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n G : networkx graph\n graph of circuit.\n\n Returns\n -------\n None.\n\n '
logger.debug(f'no of nodes: {len(G)}')
add_parallel_caps(G)
add_series_res(G)
add_stacked_transistor(G)
add_parallel_transistor(G)
initial_size = len(G)
delta = 1
while (delta > 0):
logger.debug(f'CHECKING stacked transistors {circuit_name} {G}')
add_stacked_transistor(G)
add_parallel_transistor(G)
delta = (initial_size - len(G))
initial_size = len(G)
attributes = [attr for (node, attr) in G.nodes(data=True) if ('net' not in attr['inst_type'])]
if (len(attributes) == 1):
if (('sub_graph' in attributes[0].keys()) and (attributes[0]['sub_graph'] is not None)):
logger.debug(f"sub_graph nodes {attributes[0]['sub_graph'].nodes()}")
stacked_ckt = preprocess_stack_parallel(hier_graph_dict, attributes[0]['real_inst_type'], attributes[0]['sub_graph'])
if (stacked_ckt == None):
return None
for ckt in hier_graph_dict.values():
for (node, attr) in ckt['graph'].nodes(data=True):
if (('net' not in attr['inst_type']) and (attr['inst_type'] == circuit_name)):
logger.debug(f'updating instance {node} {attr} with stacked device {attributes}')
attr['inst_type'] = attributes[0]['inst_type']
attr['real_inst_type'] = attributes[0]['real_inst_type']
attr['values'] = {**attributes[0]['values'], **attr['values']}
attr['sub_graph'] = None
attr['ports'] = [attr['connection'][port] for port in attributes[0]['ports'] if (port in attr['connection'])]
attr['edge_weight'] = attributes[0]['edge_weight']
attr['connection'] = None
return circuit_name
else:
return None | def preprocess_stack_parallel(hier_graph_dict: dict, circuit_name, G):
'\n Preprocess the input graph by reducing parallel caps, series resistance, identify stacking, adding parallel transistors.\n\n Parameters\n ----------\n hier_graph_dict : dict\n dictionary of all circuit in spice file\n circuit_name : str\n name of circuit to be processed.\n G : networkx graph\n graph of circuit.\n\n Returns\n -------\n None.\n\n '
logger.debug(f'no of nodes: {len(G)}')
add_parallel_caps(G)
add_series_res(G)
add_stacked_transistor(G)
add_parallel_transistor(G)
initial_size = len(G)
delta = 1
while (delta > 0):
logger.debug(f'CHECKING stacked transistors {circuit_name} {G}')
add_stacked_transistor(G)
add_parallel_transistor(G)
delta = (initial_size - len(G))
initial_size = len(G)
attributes = [attr for (node, attr) in G.nodes(data=True) if ('net' not in attr['inst_type'])]
if (len(attributes) == 1):
if (('sub_graph' in attributes[0].keys()) and (attributes[0]['sub_graph'] is not None)):
logger.debug(f"sub_graph nodes {attributes[0]['sub_graph'].nodes()}")
stacked_ckt = preprocess_stack_parallel(hier_graph_dict, attributes[0]['real_inst_type'], attributes[0]['sub_graph'])
if (stacked_ckt == None):
return None
for ckt in hier_graph_dict.values():
for (node, attr) in ckt['graph'].nodes(data=True):
if (('net' not in attr['inst_type']) and (attr['inst_type'] == circuit_name)):
logger.debug(f'updating instance {node} {attr} with stacked device {attributes}')
attr['inst_type'] = attributes[0]['inst_type']
attr['real_inst_type'] = attributes[0]['real_inst_type']
attr['values'] = {**attributes[0]['values'], **attr['values']}
attr['sub_graph'] = None
attr['ports'] = [attr['connection'][port] for port in attributes[0]['ports'] if (port in attr['connection'])]
attr['edge_weight'] = attributes[0]['edge_weight']
attr['connection'] = None
return circuit_name
else:
return None<|docstring|>Preprocess the input graph by reducing parallel caps, series resistance, identify stacking, adding parallel transistors.
Parameters
----------
hier_graph_dict : dict
dictionary of all circuit in spice file
circuit_name : str
name of circuit to be processed.
G : networkx graph
graph of circuit.
Returns
-------
None.<|endoftext|> |
f0eef3b2173d9098eb4fdcf727e8b6d9d99e4d9b470e20f8e067313d29fe7614 | def add_stacked_transistor(G):
'\n Reduce stacked transistors\n Parameters\n ----------\n G : networkx graph\n input graph\n\n Returns\n -------\n None.\n\n '
logger.debug(f'START reducing stacks in graph: {G.nodes(data=True)} {G.edges()} ')
logger.debug(f'initial size of graph: {len(G)}')
remove_nodes = []
modified_edges = {}
modified_nodes = {}
for (node, attr) in G.nodes(data=True):
if (('mos' in attr['inst_type']) and (node not in remove_nodes)):
for net in G.neighbors(node):
edge_wt = (G.get_edge_data(node, net)['weight'] & (~ 8))
if ((edge_wt == 4) and (len(list(G.neighbors(net))) == 2)):
for next_node in G.neighbors(net):
logger.debug(f' checking nodes: {node}, {next_node} {net} {modified_nodes} {remove_nodes} ')
if (len(({node, next_node} - (set(modified_nodes.keys()) | set(remove_nodes)))) != 2):
logger.debug(f'skipping {node} {next_node} as they are same or accessed before')
continue
elif ((not (next_node == node)) and (G.nodes[next_node]['inst_type'] == G.nodes[node]['inst_type']) and (G.get_edge_data(next_node, net)['weight'] == 1)):
common_nets = (set(G.neighbors(node)) & set(G.neighbors(next_node)))
source_net = [snet for snet in G.neighbors(next_node) if ((G.get_edge_data(next_node, snet)['weight'] & (~ 8)) == 4)]
gate_net = [gnet for gnet in G.neighbors(next_node) if (G.get_edge_data(next_node, gnet)['weight'] == 2)]
logger.debug(f'neighbor gate: {gate_net} source:{source_net},all neighbors: {list(G.edges(node, data=True))} {len(common_nets)}')
if (len(gate_net) == len(source_net) == 1):
source_net = source_net[0]
gate_net = gate_net[0]
logger.debug(f'source net: {source_net}, gate net: {gate_net}')
else:
continue
if ((gate_net in G.neighbors(node)) and (G.get_edge_data(node, gate_net)['weight'] == 2) and (len(common_nets) > 2)):
logger.debug(f'source net: {source_net}, gate net: {gate_net}')
else:
continue
logger.debug(f'check stack transistors: {node}, {next_node}, {gate_net}, {source_net},{common_nets}')
if (G.nodes[net]['net_type'] != 'external'):
if (G.get_edge_data(node, gate_net)['weight'] >= 2):
logger.debug(f'checking values {G.nodes[next_node]},{G.nodes[next_node]}')
if ('stack' in G.nodes[next_node]['values']):
stack = G.nodes[next_node]['values'].pop('stack')
else:
stack = 1
if ('stack' in G.nodes[node]['values']):
stack = (stack + G.nodes[node]['values'].pop('stack'))
else:
stack = (stack + 1)
if (G.nodes[next_node]['values'] == G.nodes[node]['values']):
modified_nodes[node] = stack
remove_nodes.append(net)
if G.has_edge(node, source_net):
wt = (G[next_node][source_net]['weight'] | G[node][source_net]['weight'])
else:
wt = G[next_node][source_net]['weight']
modified_edges[node] = [source_net, wt]
logger.debug(f'successfully modified node {modified_nodes}')
remove_nodes.append(next_node)
for (node, attr) in modified_edges.items():
G.add_edge(node, attr[0], weight=attr[1])
logger.debug(f"updating port names{G.nodes[node]['ports']} with {attr}")
G.nodes[node]['ports'][2] = attr[0]
for (node, attr) in modified_nodes.items():
G.nodes[node]['values']['stack'] = attr
for node in remove_nodes:
G.remove_node(node)
for (node, attr) in modified_nodes.items():
wt = [G.get_edge_data(node, net)['weight'] for net in G.neighbors(node)]
logger.debug(f'new neighbors of {node} {G.nodes[node]} {list(G.neighbors(node))} {wt}')
logger.debug(f'reduced_size after resolving stacked transistor: {len(G)} {G.nodes()}')
logger.debug('\n######################START CREATING HIERARCHY##########################\n') | Reduce stacked transistors
Parameters
----------
G : networkx graph
input graph
Returns
-------
None. | align/compiler/preprocess.py | add_stacked_transistor | mabrains/ALIGN-public | 0 | python | def add_stacked_transistor(G):
'\n Reduce stacked transistors\n Parameters\n ----------\n G : networkx graph\n input graph\n\n Returns\n -------\n None.\n\n '
logger.debug(f'START reducing stacks in graph: {G.nodes(data=True)} {G.edges()} ')
logger.debug(f'initial size of graph: {len(G)}')
remove_nodes = []
modified_edges = {}
modified_nodes = {}
for (node, attr) in G.nodes(data=True):
if (('mos' in attr['inst_type']) and (node not in remove_nodes)):
for net in G.neighbors(node):
edge_wt = (G.get_edge_data(node, net)['weight'] & (~ 8))
if ((edge_wt == 4) and (len(list(G.neighbors(net))) == 2)):
for next_node in G.neighbors(net):
logger.debug(f' checking nodes: {node}, {next_node} {net} {modified_nodes} {remove_nodes} ')
if (len(({node, next_node} - (set(modified_nodes.keys()) | set(remove_nodes)))) != 2):
logger.debug(f'skipping {node} {next_node} as they are same or accessed before')
continue
elif ((not (next_node == node)) and (G.nodes[next_node]['inst_type'] == G.nodes[node]['inst_type']) and (G.get_edge_data(next_node, net)['weight'] == 1)):
common_nets = (set(G.neighbors(node)) & set(G.neighbors(next_node)))
source_net = [snet for snet in G.neighbors(next_node) if ((G.get_edge_data(next_node, snet)['weight'] & (~ 8)) == 4)]
gate_net = [gnet for gnet in G.neighbors(next_node) if (G.get_edge_data(next_node, gnet)['weight'] == 2)]
logger.debug(f'neighbor gate: {gate_net} source:{source_net},all neighbors: {list(G.edges(node, data=True))} {len(common_nets)}')
if (len(gate_net) == len(source_net) == 1):
source_net = source_net[0]
gate_net = gate_net[0]
logger.debug(f'source net: {source_net}, gate net: {gate_net}')
else:
continue
if ((gate_net in G.neighbors(node)) and (G.get_edge_data(node, gate_net)['weight'] == 2) and (len(common_nets) > 2)):
logger.debug(f'source net: {source_net}, gate net: {gate_net}')
else:
continue
logger.debug(f'check stack transistors: {node}, {next_node}, {gate_net}, {source_net},{common_nets}')
if (G.nodes[net]['net_type'] != 'external'):
if (G.get_edge_data(node, gate_net)['weight'] >= 2):
logger.debug(f'checking values {G.nodes[next_node]},{G.nodes[next_node]}')
if ('stack' in G.nodes[next_node]['values']):
stack = G.nodes[next_node]['values'].pop('stack')
else:
stack = 1
if ('stack' in G.nodes[node]['values']):
stack = (stack + G.nodes[node]['values'].pop('stack'))
else:
stack = (stack + 1)
if (G.nodes[next_node]['values'] == G.nodes[node]['values']):
modified_nodes[node] = stack
remove_nodes.append(net)
if G.has_edge(node, source_net):
wt = (G[next_node][source_net]['weight'] | G[node][source_net]['weight'])
else:
wt = G[next_node][source_net]['weight']
modified_edges[node] = [source_net, wt]
logger.debug(f'successfully modified node {modified_nodes}')
remove_nodes.append(next_node)
for (node, attr) in modified_edges.items():
G.add_edge(node, attr[0], weight=attr[1])
logger.debug(f"updating port names{G.nodes[node]['ports']} with {attr}")
G.nodes[node]['ports'][2] = attr[0]
for (node, attr) in modified_nodes.items():
G.nodes[node]['values']['stack'] = attr
for node in remove_nodes:
G.remove_node(node)
for (node, attr) in modified_nodes.items():
wt = [G.get_edge_data(node, net)['weight'] for net in G.neighbors(node)]
logger.debug(f'new neighbors of {node} {G.nodes[node]} {list(G.neighbors(node))} {wt}')
logger.debug(f'reduced_size after resolving stacked transistor: {len(G)} {G.nodes()}')
logger.debug('\n######################START CREATING HIERARCHY##########################\n') | def add_stacked_transistor(G):
'\n Reduce stacked transistors\n Parameters\n ----------\n G : networkx graph\n input graph\n\n Returns\n -------\n None.\n\n '
logger.debug(f'START reducing stacks in graph: {G.nodes(data=True)} {G.edges()} ')
logger.debug(f'initial size of graph: {len(G)}')
remove_nodes = []
modified_edges = {}
modified_nodes = {}
for (node, attr) in G.nodes(data=True):
if (('mos' in attr['inst_type']) and (node not in remove_nodes)):
for net in G.neighbors(node):
edge_wt = (G.get_edge_data(node, net)['weight'] & (~ 8))
if ((edge_wt == 4) and (len(list(G.neighbors(net))) == 2)):
for next_node in G.neighbors(net):
logger.debug(f' checking nodes: {node}, {next_node} {net} {modified_nodes} {remove_nodes} ')
if (len(({node, next_node} - (set(modified_nodes.keys()) | set(remove_nodes)))) != 2):
logger.debug(f'skipping {node} {next_node} as they are same or accessed before')
continue
elif ((not (next_node == node)) and (G.nodes[next_node]['inst_type'] == G.nodes[node]['inst_type']) and (G.get_edge_data(next_node, net)['weight'] == 1)):
common_nets = (set(G.neighbors(node)) & set(G.neighbors(next_node)))
source_net = [snet for snet in G.neighbors(next_node) if ((G.get_edge_data(next_node, snet)['weight'] & (~ 8)) == 4)]
gate_net = [gnet for gnet in G.neighbors(next_node) if (G.get_edge_data(next_node, gnet)['weight'] == 2)]
logger.debug(f'neighbor gate: {gate_net} source:{source_net},all neighbors: {list(G.edges(node, data=True))} {len(common_nets)}')
if (len(gate_net) == len(source_net) == 1):
source_net = source_net[0]
gate_net = gate_net[0]
logger.debug(f'source net: {source_net}, gate net: {gate_net}')
else:
continue
if ((gate_net in G.neighbors(node)) and (G.get_edge_data(node, gate_net)['weight'] == 2) and (len(common_nets) > 2)):
logger.debug(f'source net: {source_net}, gate net: {gate_net}')
else:
continue
logger.debug(f'check stack transistors: {node}, {next_node}, {gate_net}, {source_net},{common_nets}')
if (G.nodes[net]['net_type'] != 'external'):
if (G.get_edge_data(node, gate_net)['weight'] >= 2):
logger.debug(f'checking values {G.nodes[next_node]},{G.nodes[next_node]}')
if ('stack' in G.nodes[next_node]['values']):
stack = G.nodes[next_node]['values'].pop('stack')
else:
stack = 1
if ('stack' in G.nodes[node]['values']):
stack = (stack + G.nodes[node]['values'].pop('stack'))
else:
stack = (stack + 1)
if (G.nodes[next_node]['values'] == G.nodes[node]['values']):
modified_nodes[node] = stack
remove_nodes.append(net)
if G.has_edge(node, source_net):
wt = (G[next_node][source_net]['weight'] | G[node][source_net]['weight'])
else:
wt = G[next_node][source_net]['weight']
modified_edges[node] = [source_net, wt]
logger.debug(f'successfully modified node {modified_nodes}')
remove_nodes.append(next_node)
for (node, attr) in modified_edges.items():
G.add_edge(node, attr[0], weight=attr[1])
logger.debug(f"updating port names{G.nodes[node]['ports']} with {attr}")
G.nodes[node]['ports'][2] = attr[0]
for (node, attr) in modified_nodes.items():
G.nodes[node]['values']['stack'] = attr
for node in remove_nodes:
G.remove_node(node)
for (node, attr) in modified_nodes.items():
wt = [G.get_edge_data(node, net)['weight'] for net in G.neighbors(node)]
logger.debug(f'new neighbors of {node} {G.nodes[node]} {list(G.neighbors(node))} {wt}')
logger.debug(f'reduced_size after resolving stacked transistor: {len(G)} {G.nodes()}')
logger.debug('\n######################START CREATING HIERARCHY##########################\n')<|docstring|>Reduce stacked transistors
Parameters
----------
G : networkx graph
input graph
Returns
-------
None.<|endoftext|> |
65e73db27d3eccdb25cfeb45ab4c013d8e032c4983052b001128a366b44656d7 | @abstractmethod
def get_data(self) -> Union[(str, Dict[(str, str)])]:
'Getter for "abstract" attribute subclasses define, `data`.'
pass | Getter for "abstract" attribute subclasses define, `data`. | qiskit/providers/ibmq/api/clients/websocket.py | get_data | Zoufalc/qiskit-ibmq-provider | 1 | python | @abstractmethod
def get_data(self) -> Union[(str, Dict[(str, str)])]:
pass | @abstractmethod
def get_data(self) -> Union[(str, Dict[(str, str)])]:
pass<|docstring|>Getter for "abstract" attribute subclasses define, `data`.<|endoftext|> |
4ba437f1541750134191f7d7f9fadf5a3a24224d3b63bb2ae7b5aa6780d1de27 | def as_json(self) -> str:
'Return a json representation of the message.'
return json.dumps({'type': self.type_, 'data': self.get_data()}) | Return a json representation of the message. | qiskit/providers/ibmq/api/clients/websocket.py | as_json | Zoufalc/qiskit-ibmq-provider | 1 | python | def as_json(self) -> str:
return json.dumps({'type': self.type_, 'data': self.get_data()}) | def as_json(self) -> str:
return json.dumps({'type': self.type_, 'data': self.get_data()})<|docstring|>Return a json representation of the message.<|endoftext|> |
516a938d8e1c6d36a0fe48d71d1f9c311223eb598ac0ee2bb36886e4caa53f60 | @classmethod
def from_bytes(cls, json_string: bytes) -> 'WebsocketResponseMethod':
'Instantiate a message from a bytes response.'
try:
parsed_dict = json.loads(json_string.decode('utf8'))
except (ValueError, AttributeError) as ex:
raise WebsocketIBMQProtocolError('Unable to parse message') from ex
return cls(parsed_dict['type'], parsed_dict.get('data', None)) | Instantiate a message from a bytes response. | qiskit/providers/ibmq/api/clients/websocket.py | from_bytes | Zoufalc/qiskit-ibmq-provider | 1 | python | @classmethod
def from_bytes(cls, json_string: bytes) -> 'WebsocketResponseMethod':
try:
parsed_dict = json.loads(json_string.decode('utf8'))
except (ValueError, AttributeError) as ex:
raise WebsocketIBMQProtocolError('Unable to parse message') from ex
return cls(parsed_dict['type'], parsed_dict.get('data', None)) | @classmethod
def from_bytes(cls, json_string: bytes) -> 'WebsocketResponseMethod':
try:
parsed_dict = json.loads(json_string.decode('utf8'))
except (ValueError, AttributeError) as ex:
raise WebsocketIBMQProtocolError('Unable to parse message') from ex
return cls(parsed_dict['type'], parsed_dict.get('data', None))<|docstring|>Instantiate a message from a bytes response.<|endoftext|> |
8ab145423e1dc80eb4d6a666848ff52b7379d2ede39ba8f9fee8c6f16e7d56cb | @asyncio.coroutine
def _connect(self, url: str) -> Generator[(Any, None, WebSocketClientProtocol)]:
'Authenticate against the websocket server, returning the connection.\n\n Returns:\n an open websocket connection.\n\n Raises:\n WebsocketError: if the connection to the websocket server could\n not be established.\n WebsocketAuthenticationError: if the connection to the websocket\n was established, but the authentication failed.\n WebsocketIBMQProtocolError: if the connection to the websocket\n server was established, but the answer was unexpected.\n '
try:
logger.debug('Starting new websocket connection: %s', url)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
websocket = (yield from connect(url))
except (SSLError, InvalidURI) as ex:
raise ex
except Exception as ex:
raise WebsocketError('Could not connect to server') from ex
try:
auth_request = self._authentication_message()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
(yield from websocket.send(auth_request.as_json()))
auth_response_raw = (yield from websocket.recv())
auth_response = WebsocketResponseMethod.from_bytes(auth_response_raw)
if (auth_response.type_ != 'authenticated'):
raise WebsocketIBMQProtocolError(auth_response.as_json())
except ConnectionClosed as ex:
(yield from websocket.close())
raise WebsocketAuthenticationError('Error during websocket authentication') from ex
return websocket | Authenticate against the websocket server, returning the connection.
Returns:
an open websocket connection.
Raises:
WebsocketError: if the connection to the websocket server could
not be established.
WebsocketAuthenticationError: if the connection to the websocket
was established, but the authentication failed.
WebsocketIBMQProtocolError: if the connection to the websocket
server was established, but the answer was unexpected. | qiskit/providers/ibmq/api/clients/websocket.py | _connect | Zoufalc/qiskit-ibmq-provider | 1 | python | @asyncio.coroutine
def _connect(self, url: str) -> Generator[(Any, None, WebSocketClientProtocol)]:
'Authenticate against the websocket server, returning the connection.\n\n Returns:\n an open websocket connection.\n\n Raises:\n WebsocketError: if the connection to the websocket server could\n not be established.\n WebsocketAuthenticationError: if the connection to the websocket\n was established, but the authentication failed.\n WebsocketIBMQProtocolError: if the connection to the websocket\n server was established, but the answer was unexpected.\n '
try:
logger.debug('Starting new websocket connection: %s', url)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
websocket = (yield from connect(url))
except (SSLError, InvalidURI) as ex:
raise ex
except Exception as ex:
raise WebsocketError('Could not connect to server') from ex
try:
auth_request = self._authentication_message()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
(yield from websocket.send(auth_request.as_json()))
auth_response_raw = (yield from websocket.recv())
auth_response = WebsocketResponseMethod.from_bytes(auth_response_raw)
if (auth_response.type_ != 'authenticated'):
raise WebsocketIBMQProtocolError(auth_response.as_json())
except ConnectionClosed as ex:
(yield from websocket.close())
raise WebsocketAuthenticationError('Error during websocket authentication') from ex
return websocket | @asyncio.coroutine
def _connect(self, url: str) -> Generator[(Any, None, WebSocketClientProtocol)]:
'Authenticate against the websocket server, returning the connection.\n\n Returns:\n an open websocket connection.\n\n Raises:\n WebsocketError: if the connection to the websocket server could\n not be established.\n WebsocketAuthenticationError: if the connection to the websocket\n was established, but the authentication failed.\n WebsocketIBMQProtocolError: if the connection to the websocket\n server was established, but the answer was unexpected.\n '
try:
logger.debug('Starting new websocket connection: %s', url)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
websocket = (yield from connect(url))
except (SSLError, InvalidURI) as ex:
raise ex
except Exception as ex:
raise WebsocketError('Could not connect to server') from ex
try:
auth_request = self._authentication_message()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
(yield from websocket.send(auth_request.as_json()))
auth_response_raw = (yield from websocket.recv())
auth_response = WebsocketResponseMethod.from_bytes(auth_response_raw)
if (auth_response.type_ != 'authenticated'):
raise WebsocketIBMQProtocolError(auth_response.as_json())
except ConnectionClosed as ex:
(yield from websocket.close())
raise WebsocketAuthenticationError('Error during websocket authentication') from ex
return websocket<|docstring|>Authenticate against the websocket server, returning the connection.
Returns:
an open websocket connection.
Raises:
WebsocketError: if the connection to the websocket server could
not be established.
WebsocketAuthenticationError: if the connection to the websocket
was established, but the authentication failed.
WebsocketIBMQProtocolError: if the connection to the websocket
server was established, but the answer was unexpected.<|endoftext|> |
86ec73adb0dacb42d4099a9b13c6494d5901a482b0dc4579849149318785405b | @asyncio.coroutine
def get_job_status(self, job_id: str, timeout: Optional[float]=None, retries: int=5, backoff_factor: float=0.5, status_deque: Optional[deque]=None) -> Generator[(Any, None, Dict[(str, str)])]:
'Return the status of a job.\n\n Reads status messages from the API, which are issued at regular\n intervals. When a final state is reached, the server\n closes the socket. If the websocket connection is closed without\n a reason, the exponential backoff algorithm is used as a basis to\n reestablish connections. The algorithm takes effect when a\n connection closes, it is given by:\n\n 1. When a connection closes, sleep for a calculated backoff\n time.\n 2. Try to retrieve another socket and increment a retry\n counter.\n 3. Attempt to get the job status.\n - If the connection is closed, go back to step 1.\n - If the job status is read successfully, reset the retry\n counter.\n 4. Continue until the job status is complete or the maximum\n number of retries is met.\n\n Args:\n job_id: id of the job.\n timeout: timeout, in seconds.\n retries: max number of retries.\n backoff_factor: backoff factor used to calculate the\n time to wait between retries.\n status_deque: deque used to share the latest status.\n\n Returns:\n the API response for the status of a job, as a dict that\n contains at least the keys ``status`` and ``id``.\n\n Raises:\n WebsocketError: if the websocket connection ended unexpectedly.\n WebsocketTimeoutError: if the timeout has been reached.\n '
url = '{}/jobs/{}/status'.format(self.websocket_url, job_id)
original_timeout = timeout
start_time = time.time()
attempt_retry = True
current_retry_attempt = 0
last_status = None
websocket = None
while (current_retry_attempt <= retries):
try:
websocket = (yield from self._connect(url))
while True:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if timeout:
response_raw = (yield from asyncio.wait_for(websocket.recv(), timeout=timeout))
timeout = (original_timeout - (time.time() - start_time))
else:
response_raw = (yield from websocket.recv())
logger.debug('Received message from websocket: %s', response_raw)
response = WebsocketResponseMethod.from_bytes(response_raw)
last_status = response.data
current_retry_attempt = 0
job_status = response.data.get('status')
if (job_status and (ApiJobStatus(job_status) in API_JOB_FINAL_STATES)):
return last_status
if (timeout and (timeout <= 0)):
raise WebsocketTimeoutError('Timeout reached')
if (status_deque is not None):
status_deque.append(last_status)
except (futures.TimeoutError, asyncio.TimeoutError):
raise WebsocketTimeoutError('Timeout reached') from None
except ConnectionClosed as ex:
message = 'Unexpected error'
if (ex.code == 4001):
message = 'Internal server error'
elif (ex.code == 4002):
return last_status
elif (ex.code == 4003):
attempt_retry = False
message = 'Job id not found'
raise WebsocketError('Connection with websocket closed unexpectedly: {}(status_code={})'.format(message, ex.code)) from ex
except WebsocketError as ex:
logger.info('A websocket error occurred: %s', ex)
if isinstance(ex, (WebsocketTimeoutError, WebsocketIBMQProtocolError)):
raise ex
current_retry_attempt = (current_retry_attempt + 1)
if ((current_retry_attempt > retries) or (not attempt_retry)):
raise ex
backoff_time = self._backoff_time(backoff_factor, current_retry_attempt)
logger.info('Retrying get_job_status via websocket after %s seconds: Attempt #%s.', backoff_time, current_retry_attempt)
(yield from asyncio.sleep(backoff_time))
continue
finally:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if (websocket is not None):
(yield from websocket.close())
raise WebsocketError('Failed to establish a websocket connection after {} retries.'.format(retries)) | Return the status of a job.
Reads status messages from the API, which are issued at regular
intervals. When a final state is reached, the server
closes the socket. If the websocket connection is closed without
a reason, the exponential backoff algorithm is used as a basis to
reestablish connections. The algorithm takes effect when a
connection closes, it is given by:
1. When a connection closes, sleep for a calculated backoff
time.
2. Try to retrieve another socket and increment a retry
counter.
3. Attempt to get the job status.
- If the connection is closed, go back to step 1.
- If the job status is read successfully, reset the retry
counter.
4. Continue until the job status is complete or the maximum
number of retries is met.
Args:
job_id: id of the job.
timeout: timeout, in seconds.
retries: max number of retries.
backoff_factor: backoff factor used to calculate the
time to wait between retries.
status_deque: deque used to share the latest status.
Returns:
the API response for the status of a job, as a dict that
contains at least the keys ``status`` and ``id``.
Raises:
WebsocketError: if the websocket connection ended unexpectedly.
WebsocketTimeoutError: if the timeout has been reached. | qiskit/providers/ibmq/api/clients/websocket.py | get_job_status | Zoufalc/qiskit-ibmq-provider | 1 | python | @asyncio.coroutine
def get_job_status(self, job_id: str, timeout: Optional[float]=None, retries: int=5, backoff_factor: float=0.5, status_deque: Optional[deque]=None) -> Generator[(Any, None, Dict[(str, str)])]:
'Return the status of a job.\n\n Reads status messages from the API, which are issued at regular\n intervals. When a final state is reached, the server\n closes the socket. If the websocket connection is closed without\n a reason, the exponential backoff algorithm is used as a basis to\n reestablish connections. The algorithm takes effect when a\n connection closes, it is given by:\n\n 1. When a connection closes, sleep for a calculated backoff\n time.\n 2. Try to retrieve another socket and increment a retry\n counter.\n 3. Attempt to get the job status.\n - If the connection is closed, go back to step 1.\n - If the job status is read successfully, reset the retry\n counter.\n 4. Continue until the job status is complete or the maximum\n number of retries is met.\n\n Args:\n job_id: id of the job.\n timeout: timeout, in seconds.\n retries: max number of retries.\n backoff_factor: backoff factor used to calculate the\n time to wait between retries.\n status_deque: deque used to share the latest status.\n\n Returns:\n the API response for the status of a job, as a dict that\n contains at least the keys ``status`` and ``id``.\n\n Raises:\n WebsocketError: if the websocket connection ended unexpectedly.\n WebsocketTimeoutError: if the timeout has been reached.\n '
url = '{}/jobs/{}/status'.format(self.websocket_url, job_id)
original_timeout = timeout
start_time = time.time()
attempt_retry = True
current_retry_attempt = 0
last_status = None
websocket = None
while (current_retry_attempt <= retries):
try:
websocket = (yield from self._connect(url))
while True:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if timeout:
response_raw = (yield from asyncio.wait_for(websocket.recv(), timeout=timeout))
timeout = (original_timeout - (time.time() - start_time))
else:
response_raw = (yield from websocket.recv())
logger.debug('Received message from websocket: %s', response_raw)
response = WebsocketResponseMethod.from_bytes(response_raw)
last_status = response.data
current_retry_attempt = 0
job_status = response.data.get('status')
if (job_status and (ApiJobStatus(job_status) in API_JOB_FINAL_STATES)):
return last_status
if (timeout and (timeout <= 0)):
raise WebsocketTimeoutError('Timeout reached')
if (status_deque is not None):
status_deque.append(last_status)
except (futures.TimeoutError, asyncio.TimeoutError):
raise WebsocketTimeoutError('Timeout reached') from None
except ConnectionClosed as ex:
message = 'Unexpected error'
if (ex.code == 4001):
message = 'Internal server error'
elif (ex.code == 4002):
return last_status
elif (ex.code == 4003):
attempt_retry = False
message = 'Job id not found'
raise WebsocketError('Connection with websocket closed unexpectedly: {}(status_code={})'.format(message, ex.code)) from ex
except WebsocketError as ex:
logger.info('A websocket error occurred: %s', ex)
if isinstance(ex, (WebsocketTimeoutError, WebsocketIBMQProtocolError)):
raise ex
current_retry_attempt = (current_retry_attempt + 1)
if ((current_retry_attempt > retries) or (not attempt_retry)):
raise ex
backoff_time = self._backoff_time(backoff_factor, current_retry_attempt)
logger.info('Retrying get_job_status via websocket after %s seconds: Attempt #%s.', backoff_time, current_retry_attempt)
(yield from asyncio.sleep(backoff_time))
continue
finally:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if (websocket is not None):
(yield from websocket.close())
raise WebsocketError('Failed to establish a websocket connection after {} retries.'.format(retries)) | @asyncio.coroutine
def get_job_status(self, job_id: str, timeout: Optional[float]=None, retries: int=5, backoff_factor: float=0.5, status_deque: Optional[deque]=None) -> Generator[(Any, None, Dict[(str, str)])]:
'Return the status of a job.\n\n Reads status messages from the API, which are issued at regular\n intervals. When a final state is reached, the server\n closes the socket. If the websocket connection is closed without\n a reason, the exponential backoff algorithm is used as a basis to\n reestablish connections. The algorithm takes effect when a\n connection closes, it is given by:\n\n 1. When a connection closes, sleep for a calculated backoff\n time.\n 2. Try to retrieve another socket and increment a retry\n counter.\n 3. Attempt to get the job status.\n - If the connection is closed, go back to step 1.\n - If the job status is read successfully, reset the retry\n counter.\n 4. Continue until the job status is complete or the maximum\n number of retries is met.\n\n Args:\n job_id: id of the job.\n timeout: timeout, in seconds.\n retries: max number of retries.\n backoff_factor: backoff factor used to calculate the\n time to wait between retries.\n status_deque: deque used to share the latest status.\n\n Returns:\n the API response for the status of a job, as a dict that\n contains at least the keys ``status`` and ``id``.\n\n Raises:\n WebsocketError: if the websocket connection ended unexpectedly.\n WebsocketTimeoutError: if the timeout has been reached.\n '
url = '{}/jobs/{}/status'.format(self.websocket_url, job_id)
original_timeout = timeout
start_time = time.time()
attempt_retry = True
current_retry_attempt = 0
last_status = None
websocket = None
while (current_retry_attempt <= retries):
try:
websocket = (yield from self._connect(url))
while True:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if timeout:
response_raw = (yield from asyncio.wait_for(websocket.recv(), timeout=timeout))
timeout = (original_timeout - (time.time() - start_time))
else:
response_raw = (yield from websocket.recv())
logger.debug('Received message from websocket: %s', response_raw)
response = WebsocketResponseMethod.from_bytes(response_raw)
last_status = response.data
current_retry_attempt = 0
job_status = response.data.get('status')
if (job_status and (ApiJobStatus(job_status) in API_JOB_FINAL_STATES)):
return last_status
if (timeout and (timeout <= 0)):
raise WebsocketTimeoutError('Timeout reached')
if (status_deque is not None):
status_deque.append(last_status)
except (futures.TimeoutError, asyncio.TimeoutError):
raise WebsocketTimeoutError('Timeout reached') from None
except ConnectionClosed as ex:
message = 'Unexpected error'
if (ex.code == 4001):
message = 'Internal server error'
elif (ex.code == 4002):
return last_status
elif (ex.code == 4003):
attempt_retry = False
message = 'Job id not found'
raise WebsocketError('Connection with websocket closed unexpectedly: {}(status_code={})'.format(message, ex.code)) from ex
except WebsocketError as ex:
logger.info('A websocket error occurred: %s', ex)
if isinstance(ex, (WebsocketTimeoutError, WebsocketIBMQProtocolError)):
raise ex
current_retry_attempt = (current_retry_attempt + 1)
if ((current_retry_attempt > retries) or (not attempt_retry)):
raise ex
backoff_time = self._backoff_time(backoff_factor, current_retry_attempt)
logger.info('Retrying get_job_status via websocket after %s seconds: Attempt #%s.', backoff_time, current_retry_attempt)
(yield from asyncio.sleep(backoff_time))
continue
finally:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
if (websocket is not None):
(yield from websocket.close())
raise WebsocketError('Failed to establish a websocket connection after {} retries.'.format(retries))<|docstring|>Return the status of a job.
Reads status messages from the API, which are issued at regular
intervals. When a final state is reached, the server
closes the socket. If the websocket connection is closed without
a reason, the exponential backoff algorithm is used as a basis to
reestablish connections. The algorithm takes effect when a
connection closes, it is given by:
1. When a connection closes, sleep for a calculated backoff
time.
2. Try to retrieve another socket and increment a retry
counter.
3. Attempt to get the job status.
- If the connection is closed, go back to step 1.
- If the job status is read successfully, reset the retry
counter.
4. Continue until the job status is complete or the maximum
number of retries is met.
Args:
job_id: id of the job.
timeout: timeout, in seconds.
retries: max number of retries.
backoff_factor: backoff factor used to calculate the
time to wait between retries.
status_deque: deque used to share the latest status.
Returns:
the API response for the status of a job, as a dict that
contains at least the keys ``status`` and ``id``.
Raises:
WebsocketError: if the websocket connection ended unexpectedly.
WebsocketTimeoutError: if the timeout has been reached.<|endoftext|> |
087b390070765f06ce5e1e2b53940f82246b3f1567c69be0c57c2dac8b43fd5d | def _backoff_time(self, backoff_factor: float, current_retry_attempt: int) -> float:
'Calculate the backoff time to sleep for.\n\n Exponential backoff time formula:\n {backoff_factor} * (2 ** (current_retry_attempt - 1))\n\n Args:\n backoff_factor: backoff factor, in seconds.\n current_retry_attempt: current number of retry attempts.\n\n Returns:\n The number of seconds to sleep for, before a retry attempt is made.\n '
backoff_time = (backoff_factor * (2 ** (current_retry_attempt - 1)))
return min(self.BACKOFF_MAX, backoff_time) | Calculate the backoff time to sleep for.
Exponential backoff time formula:
{backoff_factor} * (2 ** (current_retry_attempt - 1))
Args:
backoff_factor: backoff factor, in seconds.
current_retry_attempt: current number of retry attempts.
Returns:
The number of seconds to sleep for, before a retry attempt is made. | qiskit/providers/ibmq/api/clients/websocket.py | _backoff_time | Zoufalc/qiskit-ibmq-provider | 1 | python | def _backoff_time(self, backoff_factor: float, current_retry_attempt: int) -> float:
'Calculate the backoff time to sleep for.\n\n Exponential backoff time formula:\n {backoff_factor} * (2 ** (current_retry_attempt - 1))\n\n Args:\n backoff_factor: backoff factor, in seconds.\n current_retry_attempt: current number of retry attempts.\n\n Returns:\n The number of seconds to sleep for, before a retry attempt is made.\n '
backoff_time = (backoff_factor * (2 ** (current_retry_attempt - 1)))
return min(self.BACKOFF_MAX, backoff_time) | def _backoff_time(self, backoff_factor: float, current_retry_attempt: int) -> float:
'Calculate the backoff time to sleep for.\n\n Exponential backoff time formula:\n {backoff_factor} * (2 ** (current_retry_attempt - 1))\n\n Args:\n backoff_factor: backoff factor, in seconds.\n current_retry_attempt: current number of retry attempts.\n\n Returns:\n The number of seconds to sleep for, before a retry attempt is made.\n '
backoff_time = (backoff_factor * (2 ** (current_retry_attempt - 1)))
return min(self.BACKOFF_MAX, backoff_time)<|docstring|>Calculate the backoff time to sleep for.
Exponential backoff time formula:
{backoff_factor} * (2 ** (current_retry_attempt - 1))
Args:
backoff_factor: backoff factor, in seconds.
current_retry_attempt: current number of retry attempts.
Returns:
The number of seconds to sleep for, before a retry attempt is made.<|endoftext|> |
0c4ee9912f6b38fa6011139e914df15d2a3fd32a6ccbd3e7bef7a8cbea91f971 | def _authentication_message(self) -> 'WebsocketAuthenticationMessage':
'Return the message used for authenticating against the server.'
return WebsocketAuthenticationMessage(type_='authentication', data=self.access_token) | Return the message used for authenticating against the server. | qiskit/providers/ibmq/api/clients/websocket.py | _authentication_message | Zoufalc/qiskit-ibmq-provider | 1 | python | def _authentication_message(self) -> 'WebsocketAuthenticationMessage':
return WebsocketAuthenticationMessage(type_='authentication', data=self.access_token) | def _authentication_message(self) -> 'WebsocketAuthenticationMessage':
return WebsocketAuthenticationMessage(type_='authentication', data=self.access_token)<|docstring|>Return the message used for authenticating against the server.<|endoftext|> |
e869cce975ec200a8ed16614cd212b095b27ac35982588b4cc7f1c32e5204b25 | def normalization(img, img_size=(256, 256)):
'\n 归一化\n '
(h, w) = (np.shape(img)[0], np.shape(img)[1])
if (w > h):
gap = (w - h)
fill = np.zeros([1, w], np.uint8)
for i in range((gap // 2)):
img = np.concatenate((img, fill), axis=0)
for i in range((gap // 2)):
img = np.concatenate((fill, img), axis=0)
elif (w < h):
gap = (h - w)
fill = np.zeros([h, 1], np.uint8)
for i in range((gap // 2)):
img = np.concatenate((img, fill), axis=1)
for i in range((gap // 2)):
img = np.concatenate((fill, img), axis=1)
else:
pass
img_new = cv2.resize(img, img_size, interpolation=cv2.INTER_LINEAR)
return img_new | 归一化 | MachineLearning/TensorFlow/image-clssifier/unet/data_factory.py | normalization | YangXiaoo/NoteBook | 58 | python | def normalization(img, img_size=(256, 256)):
'\n \n '
(h, w) = (np.shape(img)[0], np.shape(img)[1])
if (w > h):
gap = (w - h)
fill = np.zeros([1, w], np.uint8)
for i in range((gap // 2)):
img = np.concatenate((img, fill), axis=0)
for i in range((gap // 2)):
img = np.concatenate((fill, img), axis=0)
elif (w < h):
gap = (h - w)
fill = np.zeros([h, 1], np.uint8)
for i in range((gap // 2)):
img = np.concatenate((img, fill), axis=1)
for i in range((gap // 2)):
img = np.concatenate((fill, img), axis=1)
else:
pass
img_new = cv2.resize(img, img_size, interpolation=cv2.INTER_LINEAR)
return img_new | def normalization(img, img_size=(256, 256)):
'\n \n '
(h, w) = (np.shape(img)[0], np.shape(img)[1])
if (w > h):
gap = (w - h)
fill = np.zeros([1, w], np.uint8)
for i in range((gap // 2)):
img = np.concatenate((img, fill), axis=0)
for i in range((gap // 2)):
img = np.concatenate((fill, img), axis=0)
elif (w < h):
gap = (h - w)
fill = np.zeros([h, 1], np.uint8)
for i in range((gap // 2)):
img = np.concatenate((img, fill), axis=1)
for i in range((gap // 2)):
img = np.concatenate((fill, img), axis=1)
else:
pass
img_new = cv2.resize(img, img_size, interpolation=cv2.INTER_LINEAR)
return img_new<|docstring|>归一化<|endoftext|> |
40b019ca8489a1b5f9b2b5d3481585cb6d8fd0e7a5923c5396e7ed89b75d2fc3 | def _get_pic_map(pic_files):
'\n pic_files: [pic_path, ]\n \n return: {pic_id : [file_path,]}\n '
file_dict = {}
for f in pic_files:
pic_basename = os.path.basename(f)
file_dict[pic_basename] = f
return file_dict | pic_files: [pic_path, ]
return: {pic_id : [file_path,]} | MachineLearning/TensorFlow/image-clssifier/unet/data_factory.py | _get_pic_map | YangXiaoo/NoteBook | 58 | python | def _get_pic_map(pic_files):
'\n pic_files: [pic_path, ]\n \n return: {pic_id : [file_path,]}\n '
file_dict = {}
for f in pic_files:
pic_basename = os.path.basename(f)
file_dict[pic_basename] = f
return file_dict | def _get_pic_map(pic_files):
'\n pic_files: [pic_path, ]\n \n return: {pic_id : [file_path,]}\n '
file_dict = {}
for f in pic_files:
pic_basename = os.path.basename(f)
file_dict[pic_basename] = f
return file_dict<|docstring|>pic_files: [pic_path, ]
return: {pic_id : [file_path,]}<|endoftext|> |
5cb84e357a7f8237f773fde64b73d716159f87db383c3b4fa60987a3076e7ae9 | def create_train_data(train_path, labels_path, output_train_data, output_labels_data, height, width):
'\n 创建测试集数据\n '
print('[INFO] Creating train datasets.')
train_files = sorted(get_files(train_path))
labels_files = sorted(get_files(labels_path))
len_train = len(train_files)
len_labels = len(labels_files)
assert (len_train == len_labels), '训练集与标签数量不一致'
label_dict = _get_pic_map(labels_files)
img_data = np.ndarray((len_train, height, width, 1), dtype=np.uint8)
img_labels = np.ndarray((len_labels, height, width, 1), dtype=np.uint8)
for i in range(len(train_files)):
train_basename = os.path.basename(train_files[i])
if (train_basename not in label_dict):
print(('[Warning] Skip %s cause there is no corresponding label.' % train_basename))
continue
label_pic = label_dict[train_basename]
img_train = cv2.imread(train_files[i], 0)
img_label = cv2.imread(label_pic, 0)
img_train = normalization(img_train, (height, width))
img_label = normalization(img_label, (height, width))
img = np.array(img_train)
label = np.array(img_label)
img_data[i] = np.reshape(img, (height, width, 1))
img_labels[i] = np.reshape(label, (height, width, 1))
if (not (i % 10)):
print(('[INFO] processed: %s' % i))
np.save(output_train_data, img_data)
np.save(output_labels_data, img_labels)
print('[INFO] Train data created successfully!')
return None | 创建测试集数据 | MachineLearning/TensorFlow/image-clssifier/unet/data_factory.py | create_train_data | YangXiaoo/NoteBook | 58 | python | def create_train_data(train_path, labels_path, output_train_data, output_labels_data, height, width):
'\n \n '
print('[INFO] Creating train datasets.')
train_files = sorted(get_files(train_path))
labels_files = sorted(get_files(labels_path))
len_train = len(train_files)
len_labels = len(labels_files)
assert (len_train == len_labels), '训练集与标签数量不一致'
label_dict = _get_pic_map(labels_files)
img_data = np.ndarray((len_train, height, width, 1), dtype=np.uint8)
img_labels = np.ndarray((len_labels, height, width, 1), dtype=np.uint8)
for i in range(len(train_files)):
train_basename = os.path.basename(train_files[i])
if (train_basename not in label_dict):
print(('[Warning] Skip %s cause there is no corresponding label.' % train_basename))
continue
label_pic = label_dict[train_basename]
img_train = cv2.imread(train_files[i], 0)
img_label = cv2.imread(label_pic, 0)
img_train = normalization(img_train, (height, width))
img_label = normalization(img_label, (height, width))
img = np.array(img_train)
label = np.array(img_label)
img_data[i] = np.reshape(img, (height, width, 1))
img_labels[i] = np.reshape(label, (height, width, 1))
if (not (i % 10)):
print(('[INFO] processed: %s' % i))
np.save(output_train_data, img_data)
np.save(output_labels_data, img_labels)
print('[INFO] Train data created successfully!')
return None | def create_train_data(train_path, labels_path, output_train_data, output_labels_data, height, width):
'\n \n '
print('[INFO] Creating train datasets.')
train_files = sorted(get_files(train_path))
labels_files = sorted(get_files(labels_path))
len_train = len(train_files)
len_labels = len(labels_files)
assert (len_train == len_labels), '训练集与标签数量不一致'
label_dict = _get_pic_map(labels_files)
img_data = np.ndarray((len_train, height, width, 1), dtype=np.uint8)
img_labels = np.ndarray((len_labels, height, width, 1), dtype=np.uint8)
for i in range(len(train_files)):
train_basename = os.path.basename(train_files[i])
if (train_basename not in label_dict):
print(('[Warning] Skip %s cause there is no corresponding label.' % train_basename))
continue
label_pic = label_dict[train_basename]
img_train = cv2.imread(train_files[i], 0)
img_label = cv2.imread(label_pic, 0)
img_train = normalization(img_train, (height, width))
img_label = normalization(img_label, (height, width))
img = np.array(img_train)
label = np.array(img_label)
img_data[i] = np.reshape(img, (height, width, 1))
img_labels[i] = np.reshape(label, (height, width, 1))
if (not (i % 10)):
print(('[INFO] processed: %s' % i))
np.save(output_train_data, img_data)
np.save(output_labels_data, img_labels)
print('[INFO] Train data created successfully!')
return None<|docstring|>创建测试集数据<|endoftext|> |
6759ebc58a9783ef9ff193af8de844697eb4ac13252e90b23f7cf97f957faf68 | def load_train_data(train_data_path, labels_data_path):
'\n 加载训练集数据\n '
imgs_train = np.load(train_data_path)
imgs_mask_train = np.load(labels_data_path)
imgs_train = imgs_train.astype('float32')
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_train /= 255
mean = imgs_train.mean(axis=0)
imgs_train -= mean
imgs_mask_train /= 255
imgs_mask_train[(imgs_mask_train > 0.5)] = 1
imgs_mask_train[(imgs_mask_train <= 0.5)] = 0
print('[INFO] Data loaded successfully.')
return (imgs_train, imgs_mask_train) | 加载训练集数据 | MachineLearning/TensorFlow/image-clssifier/unet/data_factory.py | load_train_data | YangXiaoo/NoteBook | 58 | python | def load_train_data(train_data_path, labels_data_path):
'\n \n '
imgs_train = np.load(train_data_path)
imgs_mask_train = np.load(labels_data_path)
imgs_train = imgs_train.astype('float32')
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_train /= 255
mean = imgs_train.mean(axis=0)
imgs_train -= mean
imgs_mask_train /= 255
imgs_mask_train[(imgs_mask_train > 0.5)] = 1
imgs_mask_train[(imgs_mask_train <= 0.5)] = 0
print('[INFO] Data loaded successfully.')
return (imgs_train, imgs_mask_train) | def load_train_data(train_data_path, labels_data_path):
'\n \n '
imgs_train = np.load(train_data_path)
imgs_mask_train = np.load(labels_data_path)
imgs_train = imgs_train.astype('float32')
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_train /= 255
mean = imgs_train.mean(axis=0)
imgs_train -= mean
imgs_mask_train /= 255
imgs_mask_train[(imgs_mask_train > 0.5)] = 1
imgs_mask_train[(imgs_mask_train <= 0.5)] = 0
print('[INFO] Data loaded successfully.')
return (imgs_train, imgs_mask_train)<|docstring|>加载训练集数据<|endoftext|> |
cd2c5dff1352694818a917c08618664ae65044a153629e504fb89a54c0ebc6a3 | def create_test_data(data_path, output_path, normalization_pic_path, height, width, suffix='.png'):
'\n 创建测试集, 并将归一化后的数据保存到指定路径\n '
print('[INFO] Creating test datasets.')
files = sorted(get_files(data_path))
len_files = len(files)
img_data = np.ndarray((len_files, height, width, 1), dtype=np.uint8)
for i in range(len(files)):
file_path = files[i]
pic_name = os.path.basename(file_path)
img_test = cv2.imread(file_path, 0)
img_test = normalization(img_test, (height, width))
tmp_save_path = os.path.join(normalization_pic_path, (str(i) + suffix))
cv2.imwrite(tmp_save_path, img_test)
img = np.array(img_test)
img_data[i] = np.reshape(img, (height, width, 1))
if (not (i % 10)):
print(('[INFO] processed: %s' % i))
np.save(output_path, img_data)
print('[INFO] Test data created successfully.') | 创建测试集, 并将归一化后的数据保存到指定路径 | MachineLearning/TensorFlow/image-clssifier/unet/data_factory.py | create_test_data | YangXiaoo/NoteBook | 58 | python | def create_test_data(data_path, output_path, normalization_pic_path, height, width, suffix='.png'):
'\n \n '
print('[INFO] Creating test datasets.')
files = sorted(get_files(data_path))
len_files = len(files)
img_data = np.ndarray((len_files, height, width, 1), dtype=np.uint8)
for i in range(len(files)):
file_path = files[i]
pic_name = os.path.basename(file_path)
img_test = cv2.imread(file_path, 0)
img_test = normalization(img_test, (height, width))
tmp_save_path = os.path.join(normalization_pic_path, (str(i) + suffix))
cv2.imwrite(tmp_save_path, img_test)
img = np.array(img_test)
img_data[i] = np.reshape(img, (height, width, 1))
if (not (i % 10)):
print(('[INFO] processed: %s' % i))
np.save(output_path, img_data)
print('[INFO] Test data created successfully.') | def create_test_data(data_path, output_path, normalization_pic_path, height, width, suffix='.png'):
'\n \n '
print('[INFO] Creating test datasets.')
files = sorted(get_files(data_path))
len_files = len(files)
img_data = np.ndarray((len_files, height, width, 1), dtype=np.uint8)
for i in range(len(files)):
file_path = files[i]
pic_name = os.path.basename(file_path)
img_test = cv2.imread(file_path, 0)
img_test = normalization(img_test, (height, width))
tmp_save_path = os.path.join(normalization_pic_path, (str(i) + suffix))
cv2.imwrite(tmp_save_path, img_test)
img = np.array(img_test)
img_data[i] = np.reshape(img, (height, width, 1))
if (not (i % 10)):
print(('[INFO] processed: %s' % i))
np.save(output_path, img_data)
print('[INFO] Test data created successfully.')<|docstring|>创建测试集, 并将归一化后的数据保存到指定路径<|endoftext|> |
7e52d064061ed47a3903ed513144ac04c338bbb787b96f883e3f68d422de0147 | def load_test_data(data_path):
'\n 加载测试数据\n '
imgs_test = np.load(data_path)
imgs_test = imgs_test.astype('float32')
imgs_test /= 255
mean = imgs_test.mean(axis=0)
imgs_test -= mean
print('[INFO] Data loaded successfully.')
return imgs_test | 加载测试数据 | MachineLearning/TensorFlow/image-clssifier/unet/data_factory.py | load_test_data | YangXiaoo/NoteBook | 58 | python | def load_test_data(data_path):
'\n \n '
imgs_test = np.load(data_path)
imgs_test = imgs_test.astype('float32')
imgs_test /= 255
mean = imgs_test.mean(axis=0)
imgs_test -= mean
print('[INFO] Data loaded successfully.')
return imgs_test | def load_test_data(data_path):
'\n \n '
imgs_test = np.load(data_path)
imgs_test = imgs_test.astype('float32')
imgs_test /= 255
mean = imgs_test.mean(axis=0)
imgs_test -= mean
print('[INFO] Data loaded successfully.')
return imgs_test<|docstring|>加载测试数据<|endoftext|> |
cf635886944c64ee4af0d5e0954567698d67f3893fba9d343964f941330cbfec | def test_wait_for_db_ready(self):
'Test waiting for db when db is available'
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertTrue(True) | Test waiting for db when db is available | app/core/tests/test_commands.py | test_wait_for_db_ready | LondonAppDeveloper/starter-django-bootstrap-postgres | 2 | python | def test_wait_for_db_ready(self):
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertTrue(True) | def test_wait_for_db_ready(self):
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertTrue(True)<|docstring|>Test waiting for db when db is available<|endoftext|> |
13c2aa44b54171099dea76b9265246876f634cca78230f710d7c989a4605908a | @patch('time.sleep', return_value=None)
def test_wait_for_db(self, ts):
'Test waiting for db'
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = (([OperationalError] * 5) + [True])
call_command('wait_for_db')
self.assertGreaterEqual(gi.call_count, 5) | Test waiting for db | app/core/tests/test_commands.py | test_wait_for_db | LondonAppDeveloper/starter-django-bootstrap-postgres | 2 | python | @patch('time.sleep', return_value=None)
def test_wait_for_db(self, ts):
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = (([OperationalError] * 5) + [True])
call_command('wait_for_db')
self.assertGreaterEqual(gi.call_count, 5) | @patch('time.sleep', return_value=None)
def test_wait_for_db(self, ts):
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = (([OperationalError] * 5) + [True])
call_command('wait_for_db')
self.assertGreaterEqual(gi.call_count, 5)<|docstring|>Test waiting for db<|endoftext|> |
030251f9c2ed3474348831e1fae8fef2a3f47d3f485aa27d1c242b303ed3fed5 | def _default_module_name(testonly):
' Provide better defaults for package names.\n\n e.g. rather than angular/packages/core/testing we want @angular/core/testing\n\n TODO(alexeagle): we ought to supply a default module name for every library in the repo.\n But we short-circuit below in cases that are currently not working.\n '
pkg = native.package_name()
if testonly:
return None
if pkg.startswith('packages/bazel'):
return None
if pkg.startswith('packages/'):
return ('@angular/' + pkg[len('packages/'):])
return None | Provide better defaults for package names.
e.g. rather than angular/packages/core/testing we want @angular/core/testing
TODO(alexeagle): we ought to supply a default module name for every library in the repo.
But we short-circuit below in cases that are currently not working. | tools/defaults.bzl | _default_module_name | gustavguez/angular | 48 | python | def _default_module_name(testonly):
' Provide better defaults for package names.\n\n e.g. rather than angular/packages/core/testing we want @angular/core/testing\n\n TODO(alexeagle): we ought to supply a default module name for every library in the repo.\n But we short-circuit below in cases that are currently not working.\n '
pkg = native.package_name()
if testonly:
return None
if pkg.startswith('packages/bazel'):
return None
if pkg.startswith('packages/'):
return ('@angular/' + pkg[len('packages/'):])
return None | def _default_module_name(testonly):
' Provide better defaults for package names.\n\n e.g. rather than angular/packages/core/testing we want @angular/core/testing\n\n TODO(alexeagle): we ought to supply a default module name for every library in the repo.\n But we short-circuit below in cases that are currently not working.\n '
pkg = native.package_name()
if testonly:
return None
if pkg.startswith('packages/bazel'):
return None
if pkg.startswith('packages/'):
return ('@angular/' + pkg[len('packages/'):])
return None<|docstring|>Provide better defaults for package names.
e.g. rather than angular/packages/core/testing we want @angular/core/testing
TODO(alexeagle): we ought to supply a default module name for every library in the repo.
But we short-circuit below in cases that are currently not working.<|endoftext|> |
47e72ee74a084f5e0aef9c9b4cc4cc2190b8a4e122f20041b59077c2f022d552 | def ts_devserver(**kwargs):
'Default values for ts_devserver'
serving_path = kwargs.pop('serving_path', '/app_bundle.js')
_ts_devserver(serving_path=serving_path, **kwargs) | Default values for ts_devserver | tools/defaults.bzl | ts_devserver | gustavguez/angular | 48 | python | def ts_devserver(**kwargs):
serving_path = kwargs.pop('serving_path', '/app_bundle.js')
_ts_devserver(serving_path=serving_path, **kwargs) | def ts_devserver(**kwargs):
serving_path = kwargs.pop('serving_path', '/app_bundle.js')
_ts_devserver(serving_path=serving_path, **kwargs)<|docstring|>Default values for ts_devserver<|endoftext|> |
f12e2e4c9375674a074e7feb040c29adb3db954104342b0b4a75c88a01030fda | def ts_library(name, tsconfig=None, testonly=False, deps=[], module_name=None, **kwargs):
'Default values for ts_library'
deps = (deps + ['@npm//tslib'])
if testonly:
deps.append('@npm//@types/jasmine')
deps.append('@npm//@types/node')
deps.append('@npm//@types/events')
if ((not tsconfig) and testonly):
tsconfig = _DEFAULT_TSCONFIG_TEST
if (not module_name):
module_name = _default_module_name(testonly)
_ts_library(name=name, tsconfig=tsconfig, testonly=testonly, deps=deps, module_name=module_name, **kwargs)
native.filegroup(name=('%s_es5' % name), srcs=[(':%s' % name)], testonly=testonly, output_group='es5_sources') | Default values for ts_library | tools/defaults.bzl | ts_library | gustavguez/angular | 48 | python | def ts_library(name, tsconfig=None, testonly=False, deps=[], module_name=None, **kwargs):
deps = (deps + ['@npm//tslib'])
if testonly:
deps.append('@npm//@types/jasmine')
deps.append('@npm//@types/node')
deps.append('@npm//@types/events')
if ((not tsconfig) and testonly):
tsconfig = _DEFAULT_TSCONFIG_TEST
if (not module_name):
module_name = _default_module_name(testonly)
_ts_library(name=name, tsconfig=tsconfig, testonly=testonly, deps=deps, module_name=module_name, **kwargs)
native.filegroup(name=('%s_es5' % name), srcs=[(':%s' % name)], testonly=testonly, output_group='es5_sources') | def ts_library(name, tsconfig=None, testonly=False, deps=[], module_name=None, **kwargs):
deps = (deps + ['@npm//tslib'])
if testonly:
deps.append('@npm//@types/jasmine')
deps.append('@npm//@types/node')
deps.append('@npm//@types/events')
if ((not tsconfig) and testonly):
tsconfig = _DEFAULT_TSCONFIG_TEST
if (not module_name):
module_name = _default_module_name(testonly)
_ts_library(name=name, tsconfig=tsconfig, testonly=testonly, deps=deps, module_name=module_name, **kwargs)
native.filegroup(name=('%s_es5' % name), srcs=[(':%s' % name)], testonly=testonly, output_group='es5_sources')<|docstring|>Default values for ts_library<|endoftext|> |
13de8a53fa42c84a8c1dd0e318f3073b666a7e133cdff78037b98a4002cd4636 | def ng_module(name, tsconfig=None, entry_point=None, testonly=False, deps=[], module_name=None, bundle_dts=True, **kwargs):
'Default values for ng_module'
deps = (deps + ['@npm//tslib'])
if testonly:
deps.append('@npm//@types/jasmine')
deps.append('@npm//@types/node')
deps.append('@npm//@types/events')
if ((not tsconfig) and testonly):
tsconfig = _DEFAULT_TSCONFIG_TEST
if (not module_name):
module_name = _default_module_name(testonly)
if (not entry_point):
entry_point = 'public_api.ts'
_ng_module(name=name, flat_module_out_file=name, tsconfig=tsconfig, entry_point=entry_point, testonly=testonly, bundle_dts=bundle_dts, deps=deps, compiler=_INTERNAL_NG_MODULE_COMPILER, api_extractor=_INTERNAL_NG_MODULE_API_EXTRACTOR, ng_xi18n=_INTERNAL_NG_MODULE_XI18N, module_name=module_name, **kwargs) | Default values for ng_module | tools/defaults.bzl | ng_module | gustavguez/angular | 48 | python | def ng_module(name, tsconfig=None, entry_point=None, testonly=False, deps=[], module_name=None, bundle_dts=True, **kwargs):
deps = (deps + ['@npm//tslib'])
if testonly:
deps.append('@npm//@types/jasmine')
deps.append('@npm//@types/node')
deps.append('@npm//@types/events')
if ((not tsconfig) and testonly):
tsconfig = _DEFAULT_TSCONFIG_TEST
if (not module_name):
module_name = _default_module_name(testonly)
if (not entry_point):
entry_point = 'public_api.ts'
_ng_module(name=name, flat_module_out_file=name, tsconfig=tsconfig, entry_point=entry_point, testonly=testonly, bundle_dts=bundle_dts, deps=deps, compiler=_INTERNAL_NG_MODULE_COMPILER, api_extractor=_INTERNAL_NG_MODULE_API_EXTRACTOR, ng_xi18n=_INTERNAL_NG_MODULE_XI18N, module_name=module_name, **kwargs) | def ng_module(name, tsconfig=None, entry_point=None, testonly=False, deps=[], module_name=None, bundle_dts=True, **kwargs):
deps = (deps + ['@npm//tslib'])
if testonly:
deps.append('@npm//@types/jasmine')
deps.append('@npm//@types/node')
deps.append('@npm//@types/events')
if ((not tsconfig) and testonly):
tsconfig = _DEFAULT_TSCONFIG_TEST
if (not module_name):
module_name = _default_module_name(testonly)
if (not entry_point):
entry_point = 'public_api.ts'
_ng_module(name=name, flat_module_out_file=name, tsconfig=tsconfig, entry_point=entry_point, testonly=testonly, bundle_dts=bundle_dts, deps=deps, compiler=_INTERNAL_NG_MODULE_COMPILER, api_extractor=_INTERNAL_NG_MODULE_API_EXTRACTOR, ng_xi18n=_INTERNAL_NG_MODULE_XI18N, module_name=module_name, **kwargs)<|docstring|>Default values for ng_module<|endoftext|> |
a1318c6a601a954969e4c89dbfaef916d81dc64c8e8383605bd1eec5c9851e4a | def ng_package(name, readme_md=None, license_banner=None, deps=[], **kwargs):
'Default values for ng_package'
if (not readme_md):
readme_md = '//packages:README.md'
if (not license_banner):
license_banner = '//packages:license-banner.txt'
deps = (deps + ['@npm//tslib'])
visibility = kwargs.pop('visibility', None)
_ng_package(name=name, deps=deps, readme_md=readme_md, license_banner=license_banner, substitutions=PKG_GROUP_REPLACEMENTS, ng_packager=_INTERNAL_NG_PACKAGE_PACKAGER, terser_config_file=_INTERNAL_NG_PACKAGE_DEFALUT_TERSER_CONFIG_FILE, rollup_config_tmpl=_INTERNAL_NG_PACKAGE_DEFAULT_ROLLUP_CONFIG_TMPL, rollup=_INTERNAL_NG_PACKAGE_DEFAULT_ROLLUP, visibility=visibility, **kwargs)
pkg_tar(name=(name + '_archive'), srcs=[(':%s' % name)], extension='tar.gz', strip_prefix=('./%s' % name), tags=['manual'], visibility=visibility) | Default values for ng_package | tools/defaults.bzl | ng_package | gustavguez/angular | 48 | python | def ng_package(name, readme_md=None, license_banner=None, deps=[], **kwargs):
if (not readme_md):
readme_md = '//packages:README.md'
if (not license_banner):
license_banner = '//packages:license-banner.txt'
deps = (deps + ['@npm//tslib'])
visibility = kwargs.pop('visibility', None)
_ng_package(name=name, deps=deps, readme_md=readme_md, license_banner=license_banner, substitutions=PKG_GROUP_REPLACEMENTS, ng_packager=_INTERNAL_NG_PACKAGE_PACKAGER, terser_config_file=_INTERNAL_NG_PACKAGE_DEFALUT_TERSER_CONFIG_FILE, rollup_config_tmpl=_INTERNAL_NG_PACKAGE_DEFAULT_ROLLUP_CONFIG_TMPL, rollup=_INTERNAL_NG_PACKAGE_DEFAULT_ROLLUP, visibility=visibility, **kwargs)
pkg_tar(name=(name + '_archive'), srcs=[(':%s' % name)], extension='tar.gz', strip_prefix=('./%s' % name), tags=['manual'], visibility=visibility) | def ng_package(name, readme_md=None, license_banner=None, deps=[], **kwargs):
if (not readme_md):
readme_md = '//packages:README.md'
if (not license_banner):
license_banner = '//packages:license-banner.txt'
deps = (deps + ['@npm//tslib'])
visibility = kwargs.pop('visibility', None)
_ng_package(name=name, deps=deps, readme_md=readme_md, license_banner=license_banner, substitutions=PKG_GROUP_REPLACEMENTS, ng_packager=_INTERNAL_NG_PACKAGE_PACKAGER, terser_config_file=_INTERNAL_NG_PACKAGE_DEFALUT_TERSER_CONFIG_FILE, rollup_config_tmpl=_INTERNAL_NG_PACKAGE_DEFAULT_ROLLUP_CONFIG_TMPL, rollup=_INTERNAL_NG_PACKAGE_DEFAULT_ROLLUP, visibility=visibility, **kwargs)
pkg_tar(name=(name + '_archive'), srcs=[(':%s' % name)], extension='tar.gz', strip_prefix=('./%s' % name), tags=['manual'], visibility=visibility)<|docstring|>Default values for ng_package<|endoftext|> |
6fed38a20cdc9b12642bf2daeec898b1e2c5ad9a62086fa40006953f469cd531 | def pkg_npm(name, substitutions={}, **kwargs):
'Default values for pkg_npm'
visibility = kwargs.pop('visibility', None)
_pkg_npm(name=name, substitutions=dict(substitutions, **PKG_GROUP_REPLACEMENTS), visibility=visibility, **kwargs)
pkg_tar(name=(name + '_archive'), srcs=[(':%s' % name)], extension='tar.gz', strip_prefix=('./%s' % name), tags=['manual'], visibility=visibility) | Default values for pkg_npm | tools/defaults.bzl | pkg_npm | gustavguez/angular | 48 | python | def pkg_npm(name, substitutions={}, **kwargs):
visibility = kwargs.pop('visibility', None)
_pkg_npm(name=name, substitutions=dict(substitutions, **PKG_GROUP_REPLACEMENTS), visibility=visibility, **kwargs)
pkg_tar(name=(name + '_archive'), srcs=[(':%s' % name)], extension='tar.gz', strip_prefix=('./%s' % name), tags=['manual'], visibility=visibility) | def pkg_npm(name, substitutions={}, **kwargs):
visibility = kwargs.pop('visibility', None)
_pkg_npm(name=name, substitutions=dict(substitutions, **PKG_GROUP_REPLACEMENTS), visibility=visibility, **kwargs)
pkg_tar(name=(name + '_archive'), srcs=[(':%s' % name)], extension='tar.gz', strip_prefix=('./%s' % name), tags=['manual'], visibility=visibility)<|docstring|>Default values for pkg_npm<|endoftext|> |
01f99b2d8a952410ac99df3d7f4295b52257e17e3ba0606512336c9e21bfdf7c | def karma_web_test_suite(name, **kwargs):
'Default values for karma_web_test_suite'
bootstrap = kwargs.pop('bootstrap', ['//:web_test_bootstrap_scripts'])
deps = (kwargs.pop('deps', []) + ['@npm//karma-browserstack-launcher', '@npm//karma-sauce-launcher', '@npm//:node_modules/tslib/tslib.js', '//tools/rxjs:rxjs_umd_modules', '//packages/zone.js:npm_package'])
runtime_deps = (kwargs.pop('runtime_deps', []) + ['//tools/testing:browser'])
data = kwargs.pop('data', [])
tags = kwargs.pop('tags', [])
_karma_web_test_suite(name=name, runtime_deps=runtime_deps, bootstrap=bootstrap, deps=deps, browsers=['//dev-infra/browsers/firefox:firefox'], data=data, tags=tags, **kwargs)
_karma_web_test(name=('saucelabs_%s' % name), timeout='long', runtime_deps=runtime_deps, bootstrap=bootstrap, config_file='//:karma-js.conf.js', deps=deps, data=(data + ['//:browser-providers.conf.js', '//tools:jasmine-seed-generator.js']), karma='//tools/saucelabs:karma-saucelabs', tags=(tags + ['exclusive', 'manual', 'no-remote-exec', 'saucelabs']), configuration_env_vars=['KARMA_WEB_TEST_MODE'], **kwargs) | Default values for karma_web_test_suite | tools/defaults.bzl | karma_web_test_suite | gustavguez/angular | 48 | python | def karma_web_test_suite(name, **kwargs):
bootstrap = kwargs.pop('bootstrap', ['//:web_test_bootstrap_scripts'])
deps = (kwargs.pop('deps', []) + ['@npm//karma-browserstack-launcher', '@npm//karma-sauce-launcher', '@npm//:node_modules/tslib/tslib.js', '//tools/rxjs:rxjs_umd_modules', '//packages/zone.js:npm_package'])
runtime_deps = (kwargs.pop('runtime_deps', []) + ['//tools/testing:browser'])
data = kwargs.pop('data', [])
tags = kwargs.pop('tags', [])
_karma_web_test_suite(name=name, runtime_deps=runtime_deps, bootstrap=bootstrap, deps=deps, browsers=['//dev-infra/browsers/firefox:firefox'], data=data, tags=tags, **kwargs)
_karma_web_test(name=('saucelabs_%s' % name), timeout='long', runtime_deps=runtime_deps, bootstrap=bootstrap, config_file='//:karma-js.conf.js', deps=deps, data=(data + ['//:browser-providers.conf.js', '//tools:jasmine-seed-generator.js']), karma='//tools/saucelabs:karma-saucelabs', tags=(tags + ['exclusive', 'manual', 'no-remote-exec', 'saucelabs']), configuration_env_vars=['KARMA_WEB_TEST_MODE'], **kwargs) | def karma_web_test_suite(name, **kwargs):
bootstrap = kwargs.pop('bootstrap', ['//:web_test_bootstrap_scripts'])
deps = (kwargs.pop('deps', []) + ['@npm//karma-browserstack-launcher', '@npm//karma-sauce-launcher', '@npm//:node_modules/tslib/tslib.js', '//tools/rxjs:rxjs_umd_modules', '//packages/zone.js:npm_package'])
runtime_deps = (kwargs.pop('runtime_deps', []) + ['//tools/testing:browser'])
data = kwargs.pop('data', [])
tags = kwargs.pop('tags', [])
_karma_web_test_suite(name=name, runtime_deps=runtime_deps, bootstrap=bootstrap, deps=deps, browsers=['//dev-infra/browsers/firefox:firefox'], data=data, tags=tags, **kwargs)
_karma_web_test(name=('saucelabs_%s' % name), timeout='long', runtime_deps=runtime_deps, bootstrap=bootstrap, config_file='//:karma-js.conf.js', deps=deps, data=(data + ['//:browser-providers.conf.js', '//tools:jasmine-seed-generator.js']), karma='//tools/saucelabs:karma-saucelabs', tags=(tags + ['exclusive', 'manual', 'no-remote-exec', 'saucelabs']), configuration_env_vars=['KARMA_WEB_TEST_MODE'], **kwargs)<|docstring|>Default values for karma_web_test_suite<|endoftext|> |
de7161d1ab19296952da0a5c96d50853a64241db97ce700d6d7149aff8287739 | def protractor_web_test_suite(**kwargs):
'Default values for protractor_web_test_suite'
_protractor_web_test_suite(browsers=['//dev-infra/browsers/chromium:chromium'], **kwargs) | Default values for protractor_web_test_suite | tools/defaults.bzl | protractor_web_test_suite | gustavguez/angular | 48 | python | def protractor_web_test_suite(**kwargs):
_protractor_web_test_suite(browsers=['//dev-infra/browsers/chromium:chromium'], **kwargs) | def protractor_web_test_suite(**kwargs):
_protractor_web_test_suite(browsers=['//dev-infra/browsers/chromium:chromium'], **kwargs)<|docstring|>Default values for protractor_web_test_suite<|endoftext|> |
5d3ef1470d226041e1ae35589b8ab7c30318178324fbf6112ad6ac0ab07b7e7c | def ng_benchmark(**kwargs):
'Default values for ng_benchmark'
_ng_benchmark(**kwargs) | Default values for ng_benchmark | tools/defaults.bzl | ng_benchmark | gustavguez/angular | 48 | python | def ng_benchmark(**kwargs):
_ng_benchmark(**kwargs) | def ng_benchmark(**kwargs):
_ng_benchmark(**kwargs)<|docstring|>Default values for ng_benchmark<|endoftext|> |
b94be68d7dbed5b5a35f9cefae2aa10dd317f9ef287cd45999826a9b2d37444c | def nodejs_binary(data=[], **kwargs):
'Default values for nodejs_binary'
_nodejs_binary(configuration_env_vars=['angular_ivy_enabled'], data=(data + ['@npm//source-map-support']), **kwargs) | Default values for nodejs_binary | tools/defaults.bzl | nodejs_binary | gustavguez/angular | 48 | python | def nodejs_binary(data=[], **kwargs):
_nodejs_binary(configuration_env_vars=['angular_ivy_enabled'], data=(data + ['@npm//source-map-support']), **kwargs) | def nodejs_binary(data=[], **kwargs):
_nodejs_binary(configuration_env_vars=['angular_ivy_enabled'], data=(data + ['@npm//source-map-support']), **kwargs)<|docstring|>Default values for nodejs_binary<|endoftext|> |
9f7491c1434c28ce80ea177290571308afd26c6be5271c66400e8b437c4938b5 | def jasmine_node_test(bootstrap=[], **kwargs):
'Default values for jasmine_node_test\n\n Args:\n bootstrap: A list of labels of scripts to run before the entry_point.\n\n The labels can either be individual files or a filegroup that contain a single\n file.\n\n The label is automatically added to the deps of jasmine_node_test.\n If the label ends in `_es5` which by convention selects the es5 outputs\n of a ts_library rule, then corresponding ts_library target sans `_es5`\n is also added to the deps of jasmine_node_test.\n\n For example with,\n\n jasmine_node_test(\n name = "test",\n bootstrap = ["//tools/testing:node_es5"],\n deps = [":test_lib"],\n )\n\n the `//tools/testing:node` target will automatically get added to deps\n by this macro. This removes the need for duplicate deps on the\n target and makes the usage of this rule less verbose.'
deps = (kwargs.pop('deps', []) + ['@npm//chokidar', '@npm//domino', '@npm//jasmine-core', '@npm//reflect-metadata', '@npm//source-map-support', '@npm//tslib', '@npm//xhr2'])
configuration_env_vars = (kwargs.pop('configuration_env_vars', []) + ['angular_ivy_enabled'])
templated_args = kwargs.pop('templated_args', [])
for label in bootstrap:
deps += [label]
templated_args += [('--node_options=--require=$$(rlocation $(rootpath %s))' % label)]
if label.endswith('_es5'):
deps += [label[:(- 4)]]
_jasmine_node_test(deps=deps, configuration_env_vars=configuration_env_vars, templated_args=templated_args, **kwargs) | Default values for jasmine_node_test
Args:
bootstrap: A list of labels of scripts to run before the entry_point.
The labels can either be individual files or a filegroup that contain a single
file.
The label is automatically added to the deps of jasmine_node_test.
If the label ends in `_es5` which by convention selects the es5 outputs
of a ts_library rule, then corresponding ts_library target sans `_es5`
is also added to the deps of jasmine_node_test.
For example with,
jasmine_node_test(
name = "test",
bootstrap = ["//tools/testing:node_es5"],
deps = [":test_lib"],
)
the `//tools/testing:node` target will automatically get added to deps
by this macro. This removes the need for duplicate deps on the
target and makes the usage of this rule less verbose. | tools/defaults.bzl | jasmine_node_test | gustavguez/angular | 48 | python | def jasmine_node_test(bootstrap=[], **kwargs):
'Default values for jasmine_node_test\n\n Args:\n bootstrap: A list of labels of scripts to run before the entry_point.\n\n The labels can either be individual files or a filegroup that contain a single\n file.\n\n The label is automatically added to the deps of jasmine_node_test.\n If the label ends in `_es5` which by convention selects the es5 outputs\n of a ts_library rule, then corresponding ts_library target sans `_es5`\n is also added to the deps of jasmine_node_test.\n\n For example with,\n\n jasmine_node_test(\n name = "test",\n bootstrap = ["//tools/testing:node_es5"],\n deps = [":test_lib"],\n )\n\n the `//tools/testing:node` target will automatically get added to deps\n by this macro. This removes the need for duplicate deps on the\n target and makes the usage of this rule less verbose.'
deps = (kwargs.pop('deps', []) + ['@npm//chokidar', '@npm//domino', '@npm//jasmine-core', '@npm//reflect-metadata', '@npm//source-map-support', '@npm//tslib', '@npm//xhr2'])
configuration_env_vars = (kwargs.pop('configuration_env_vars', []) + ['angular_ivy_enabled'])
templated_args = kwargs.pop('templated_args', [])
for label in bootstrap:
deps += [label]
templated_args += [('--node_options=--require=$$(rlocation $(rootpath %s))' % label)]
if label.endswith('_es5'):
deps += [label[:(- 4)]]
_jasmine_node_test(deps=deps, configuration_env_vars=configuration_env_vars, templated_args=templated_args, **kwargs) | def jasmine_node_test(bootstrap=[], **kwargs):
'Default values for jasmine_node_test\n\n Args:\n bootstrap: A list of labels of scripts to run before the entry_point.\n\n The labels can either be individual files or a filegroup that contain a single\n file.\n\n The label is automatically added to the deps of jasmine_node_test.\n If the label ends in `_es5` which by convention selects the es5 outputs\n of a ts_library rule, then corresponding ts_library target sans `_es5`\n is also added to the deps of jasmine_node_test.\n\n For example with,\n\n jasmine_node_test(\n name = "test",\n bootstrap = ["//tools/testing:node_es5"],\n deps = [":test_lib"],\n )\n\n the `//tools/testing:node` target will automatically get added to deps\n by this macro. This removes the need for duplicate deps on the\n target and makes the usage of this rule less verbose.'
deps = (kwargs.pop('deps', []) + ['@npm//chokidar', '@npm//domino', '@npm//jasmine-core', '@npm//reflect-metadata', '@npm//source-map-support', '@npm//tslib', '@npm//xhr2'])
configuration_env_vars = (kwargs.pop('configuration_env_vars', []) + ['angular_ivy_enabled'])
templated_args = kwargs.pop('templated_args', [])
for label in bootstrap:
deps += [label]
templated_args += [('--node_options=--require=$$(rlocation $(rootpath %s))' % label)]
if label.endswith('_es5'):
deps += [label[:(- 4)]]
_jasmine_node_test(deps=deps, configuration_env_vars=configuration_env_vars, templated_args=templated_args, **kwargs)<|docstring|>Default values for jasmine_node_test
Args:
bootstrap: A list of labels of scripts to run before the entry_point.
The labels can either be individual files or a filegroup that contain a single
file.
The label is automatically added to the deps of jasmine_node_test.
If the label ends in `_es5` which by convention selects the es5 outputs
of a ts_library rule, then corresponding ts_library target sans `_es5`
is also added to the deps of jasmine_node_test.
For example with,
jasmine_node_test(
name = "test",
bootstrap = ["//tools/testing:node_es5"],
deps = [":test_lib"],
)
the `//tools/testing:node` target will automatically get added to deps
by this macro. This removes the need for duplicate deps on the
target and makes the usage of this rule less verbose.<|endoftext|> |
6e271a8c2863a5dfe64e94524be943a921423f12ca311f73a6839c7c55256cbf | def ng_rollup_bundle(deps=[], **kwargs):
'Default values for ng_rollup_bundle'
deps = (deps + ['@npm//tslib', '@npm//reflect-metadata'])
_ng_rollup_bundle(deps=deps, **kwargs) | Default values for ng_rollup_bundle | tools/defaults.bzl | ng_rollup_bundle | gustavguez/angular | 48 | python | def ng_rollup_bundle(deps=[], **kwargs):
deps = (deps + ['@npm//tslib', '@npm//reflect-metadata'])
_ng_rollup_bundle(deps=deps, **kwargs) | def ng_rollup_bundle(deps=[], **kwargs):
deps = (deps + ['@npm//tslib', '@npm//reflect-metadata'])
_ng_rollup_bundle(deps=deps, **kwargs)<|docstring|>Default values for ng_rollup_bundle<|endoftext|> |
dc20433b99520454b09d2dc584b386075b136d2ff787e0c7fe12042710fcf12c | def rollup_bundle(name, testonly=False, sourcemap='true', **kwargs):
'A drop in replacement for the rules nodejs [legacy rollup_bundle].\n\n Runs [rollup_bundle], [terser_minified] and [babel] for downleveling to es5\n to produce a number of output bundles.\n\n es2015 iife : "%{name}.es2015.js"\n es2015 iife minified : "%{name}.min.es2015.js"\n es2015 iife minified (debug) : "%{name}.min_debug.es2015.js"\n es5 iife : "%{name}.js"\n es5 iife minified : "%{name}.min.js"\n es5 iife minified (debug) : "%{name}.min_debug.js"\n es5 umd : "%{name}.es5umd.js"\n es5 umd minified : "%{name}.min.es5umd.js"\n es2015 umd : "%{name}.umd.js"\n es2015 umd minified : "%{name}.min.umd.js"\n\n ".js.map" files are also produced for each bundle.\n\n [legacy rollup_bundle]: https://github.com/bazelbuild/rules_nodejs/blob/0.38.3/internal/rollup/rollup_bundle.bzl\n [rollup_bundle]: https://bazelbuild.github.io/rules_nodejs/Rollup.html\n [terser_minified]: https://bazelbuild.github.io/rules_nodejs/Terser.html\n [babel]: https://babeljs.io/\n '
common_terser_args = {'args': ['--comments'], 'sourcemap': False}
_rollup_bundle(name=(name + '.es2015'), testonly=testonly, format='iife', sourcemap=sourcemap, **kwargs)
terser_minified(name=(name + '.min.es2015'), testonly=testonly, src=(name + '.es2015'), **common_terser_args)
native.filegroup(name=(name + '.min.es2015.js'), testonly=testonly, srcs=[(name + '.min.es2015')])
terser_minified(name=(name + '.min_debug.es2015'), testonly=testonly, src=(name + '.es2015'), **common_terser_args)
native.filegroup(name=(name + '.min_debug.es2015.js'), testonly=testonly, srcs=[(name + '.min_debug.es2015')])
tsc(name=name, testonly=testonly, outs=[(name + '.js')], args=[('$(execpath :%s.es2015.js)' % name), '--types', '--skipLibCheck', '--target', 'es5', '--lib', 'es2015,dom', '--allowJS', '--outFile', ('$(execpath :%s.js)' % name)], data=[(name + '.es2015.js')])
terser_minified(name=(name + '.min'), testonly=testonly, src=(name + ''), **common_terser_args)
native.filegroup(name=(name + '.min.js'), testonly=testonly, srcs=[(name + '.min')])
terser_minified(name=(name + '.min_debug'), testonly=testonly, src=(name + ''), debug=True, **common_terser_args)
native.filegroup(name=(name + '.min_debug.js'), testonly=testonly, srcs=[(name + '.min_debug')])
_rollup_bundle(name=(name + '.umd'), testonly=testonly, format='umd', sourcemap=sourcemap, **kwargs)
terser_minified(name=(name + '.min.umd'), testonly=testonly, src=(name + '.umd'), **common_terser_args)
native.filegroup(name=(name + '.min.umd.js'), testonly=testonly, srcs=[(name + '.min.umd')])
tsc(name=(name + '.es5umd'), testonly=testonly, outs=[(name + '.es5umd.js')], args=[('$(execpath :%s.umd.js)' % name), '--types', '--skipLibCheck', '--target', 'es5', '--lib', 'es2015,dom', '--allowJS', '--outFile', ('$(execpath :%s.es5umd.js)' % name)], data=[(name + '.umd.js')])
terser_minified(name=(name + '.min.es5umd'), testonly=testonly, src=(name + '.es5umd'), **common_terser_args)
native.filegroup(name=(name + '.min.es5umd.js'), testonly=testonly, srcs=[(name + '.min.es5umd')]) | A drop in replacement for the rules nodejs [legacy rollup_bundle].
Runs [rollup_bundle], [terser_minified] and [babel] for downleveling to es5
to produce a number of output bundles.
es2015 iife : "%{name}.es2015.js"
es2015 iife minified : "%{name}.min.es2015.js"
es2015 iife minified (debug) : "%{name}.min_debug.es2015.js"
es5 iife : "%{name}.js"
es5 iife minified : "%{name}.min.js"
es5 iife minified (debug) : "%{name}.min_debug.js"
es5 umd : "%{name}.es5umd.js"
es5 umd minified : "%{name}.min.es5umd.js"
es2015 umd : "%{name}.umd.js"
es2015 umd minified : "%{name}.min.umd.js"
".js.map" files are also produced for each bundle.
[legacy rollup_bundle]: https://github.com/bazelbuild/rules_nodejs/blob/0.38.3/internal/rollup/rollup_bundle.bzl
[rollup_bundle]: https://bazelbuild.github.io/rules_nodejs/Rollup.html
[terser_minified]: https://bazelbuild.github.io/rules_nodejs/Terser.html
[babel]: https://babeljs.io/ | tools/defaults.bzl | rollup_bundle | gustavguez/angular | 48 | python | def rollup_bundle(name, testonly=False, sourcemap='true', **kwargs):
'A drop in replacement for the rules nodejs [legacy rollup_bundle].\n\n Runs [rollup_bundle], [terser_minified] and [babel] for downleveling to es5\n to produce a number of output bundles.\n\n es2015 iife : "%{name}.es2015.js"\n es2015 iife minified : "%{name}.min.es2015.js"\n es2015 iife minified (debug) : "%{name}.min_debug.es2015.js"\n es5 iife : "%{name}.js"\n es5 iife minified : "%{name}.min.js"\n es5 iife minified (debug) : "%{name}.min_debug.js"\n es5 umd : "%{name}.es5umd.js"\n es5 umd minified : "%{name}.min.es5umd.js"\n es2015 umd : "%{name}.umd.js"\n es2015 umd minified : "%{name}.min.umd.js"\n\n ".js.map" files are also produced for each bundle.\n\n [legacy rollup_bundle]: https://github.com/bazelbuild/rules_nodejs/blob/0.38.3/internal/rollup/rollup_bundle.bzl\n [rollup_bundle]: https://bazelbuild.github.io/rules_nodejs/Rollup.html\n [terser_minified]: https://bazelbuild.github.io/rules_nodejs/Terser.html\n [babel]: https://babeljs.io/\n '
common_terser_args = {'args': ['--comments'], 'sourcemap': False}
_rollup_bundle(name=(name + '.es2015'), testonly=testonly, format='iife', sourcemap=sourcemap, **kwargs)
terser_minified(name=(name + '.min.es2015'), testonly=testonly, src=(name + '.es2015'), **common_terser_args)
native.filegroup(name=(name + '.min.es2015.js'), testonly=testonly, srcs=[(name + '.min.es2015')])
terser_minified(name=(name + '.min_debug.es2015'), testonly=testonly, src=(name + '.es2015'), **common_terser_args)
native.filegroup(name=(name + '.min_debug.es2015.js'), testonly=testonly, srcs=[(name + '.min_debug.es2015')])
tsc(name=name, testonly=testonly, outs=[(name + '.js')], args=[('$(execpath :%s.es2015.js)' % name), '--types', '--skipLibCheck', '--target', 'es5', '--lib', 'es2015,dom', '--allowJS', '--outFile', ('$(execpath :%s.js)' % name)], data=[(name + '.es2015.js')])
terser_minified(name=(name + '.min'), testonly=testonly, src=(name + ), **common_terser_args)
native.filegroup(name=(name + '.min.js'), testonly=testonly, srcs=[(name + '.min')])
terser_minified(name=(name + '.min_debug'), testonly=testonly, src=(name + ), debug=True, **common_terser_args)
native.filegroup(name=(name + '.min_debug.js'), testonly=testonly, srcs=[(name + '.min_debug')])
_rollup_bundle(name=(name + '.umd'), testonly=testonly, format='umd', sourcemap=sourcemap, **kwargs)
terser_minified(name=(name + '.min.umd'), testonly=testonly, src=(name + '.umd'), **common_terser_args)
native.filegroup(name=(name + '.min.umd.js'), testonly=testonly, srcs=[(name + '.min.umd')])
tsc(name=(name + '.es5umd'), testonly=testonly, outs=[(name + '.es5umd.js')], args=[('$(execpath :%s.umd.js)' % name), '--types', '--skipLibCheck', '--target', 'es5', '--lib', 'es2015,dom', '--allowJS', '--outFile', ('$(execpath :%s.es5umd.js)' % name)], data=[(name + '.umd.js')])
terser_minified(name=(name + '.min.es5umd'), testonly=testonly, src=(name + '.es5umd'), **common_terser_args)
native.filegroup(name=(name + '.min.es5umd.js'), testonly=testonly, srcs=[(name + '.min.es5umd')]) | def rollup_bundle(name, testonly=False, sourcemap='true', **kwargs):
'A drop in replacement for the rules nodejs [legacy rollup_bundle].\n\n Runs [rollup_bundle], [terser_minified] and [babel] for downleveling to es5\n to produce a number of output bundles.\n\n es2015 iife : "%{name}.es2015.js"\n es2015 iife minified : "%{name}.min.es2015.js"\n es2015 iife minified (debug) : "%{name}.min_debug.es2015.js"\n es5 iife : "%{name}.js"\n es5 iife minified : "%{name}.min.js"\n es5 iife minified (debug) : "%{name}.min_debug.js"\n es5 umd : "%{name}.es5umd.js"\n es5 umd minified : "%{name}.min.es5umd.js"\n es2015 umd : "%{name}.umd.js"\n es2015 umd minified : "%{name}.min.umd.js"\n\n ".js.map" files are also produced for each bundle.\n\n [legacy rollup_bundle]: https://github.com/bazelbuild/rules_nodejs/blob/0.38.3/internal/rollup/rollup_bundle.bzl\n [rollup_bundle]: https://bazelbuild.github.io/rules_nodejs/Rollup.html\n [terser_minified]: https://bazelbuild.github.io/rules_nodejs/Terser.html\n [babel]: https://babeljs.io/\n '
common_terser_args = {'args': ['--comments'], 'sourcemap': False}
_rollup_bundle(name=(name + '.es2015'), testonly=testonly, format='iife', sourcemap=sourcemap, **kwargs)
terser_minified(name=(name + '.min.es2015'), testonly=testonly, src=(name + '.es2015'), **common_terser_args)
native.filegroup(name=(name + '.min.es2015.js'), testonly=testonly, srcs=[(name + '.min.es2015')])
terser_minified(name=(name + '.min_debug.es2015'), testonly=testonly, src=(name + '.es2015'), **common_terser_args)
native.filegroup(name=(name + '.min_debug.es2015.js'), testonly=testonly, srcs=[(name + '.min_debug.es2015')])
tsc(name=name, testonly=testonly, outs=[(name + '.js')], args=[('$(execpath :%s.es2015.js)' % name), '--types', '--skipLibCheck', '--target', 'es5', '--lib', 'es2015,dom', '--allowJS', '--outFile', ('$(execpath :%s.js)' % name)], data=[(name + '.es2015.js')])
terser_minified(name=(name + '.min'), testonly=testonly, src=(name + ), **common_terser_args)
native.filegroup(name=(name + '.min.js'), testonly=testonly, srcs=[(name + '.min')])
terser_minified(name=(name + '.min_debug'), testonly=testonly, src=(name + ), debug=True, **common_terser_args)
native.filegroup(name=(name + '.min_debug.js'), testonly=testonly, srcs=[(name + '.min_debug')])
_rollup_bundle(name=(name + '.umd'), testonly=testonly, format='umd', sourcemap=sourcemap, **kwargs)
terser_minified(name=(name + '.min.umd'), testonly=testonly, src=(name + '.umd'), **common_terser_args)
native.filegroup(name=(name + '.min.umd.js'), testonly=testonly, srcs=[(name + '.min.umd')])
tsc(name=(name + '.es5umd'), testonly=testonly, outs=[(name + '.es5umd.js')], args=[('$(execpath :%s.umd.js)' % name), '--types', '--skipLibCheck', '--target', 'es5', '--lib', 'es2015,dom', '--allowJS', '--outFile', ('$(execpath :%s.es5umd.js)' % name)], data=[(name + '.umd.js')])
terser_minified(name=(name + '.min.es5umd'), testonly=testonly, src=(name + '.es5umd'), **common_terser_args)
native.filegroup(name=(name + '.min.es5umd.js'), testonly=testonly, srcs=[(name + '.min.es5umd')])<|docstring|>A drop in replacement for the rules nodejs [legacy rollup_bundle].
Runs [rollup_bundle], [terser_minified] and [babel] for downleveling to es5
to produce a number of output bundles.
es2015 iife : "%{name}.es2015.js"
es2015 iife minified : "%{name}.min.es2015.js"
es2015 iife minified (debug) : "%{name}.min_debug.es2015.js"
es5 iife : "%{name}.js"
es5 iife minified : "%{name}.min.js"
es5 iife minified (debug) : "%{name}.min_debug.js"
es5 umd : "%{name}.es5umd.js"
es5 umd minified : "%{name}.min.es5umd.js"
es2015 umd : "%{name}.umd.js"
es2015 umd minified : "%{name}.min.umd.js"
".js.map" files are also produced for each bundle.
[legacy rollup_bundle]: https://github.com/bazelbuild/rules_nodejs/blob/0.38.3/internal/rollup/rollup_bundle.bzl
[rollup_bundle]: https://bazelbuild.github.io/rules_nodejs/Rollup.html
[terser_minified]: https://bazelbuild.github.io/rules_nodejs/Terser.html
[babel]: https://babeljs.io/<|endoftext|> |
5cf8bb60ecaf7b2bfacb5eb3122896457ec75ed27e147655b53b57f6029b0642 | def keys_to_movement(keys):
'\n Convert keys to a ...multi-hot... array\n\n [A,W,D,S] boolean values.\n '
output = [0, 0, 0, 0, 0]
if ('A' in keys):
output[0] = 1
elif ('D' in keys):
output[2] = 1
elif ('S' in keys):
output[3] = 1
elif ('W' in keys):
output[1] = 1
if (output == [0, 0, 0, 0, 0]):
output[4] = 1
return output | Convert keys to a ...multi-hot... array
[A,W,D,S] boolean values. | utilities/getkeys.py | keys_to_movement | workofart/brawlstars-ai | 12 | python | def keys_to_movement(keys):
'\n Convert keys to a ...multi-hot... array\n\n [A,W,D,S] boolean values.\n '
output = [0, 0, 0, 0, 0]
if ('A' in keys):
output[0] = 1
elif ('D' in keys):
output[2] = 1
elif ('S' in keys):
output[3] = 1
elif ('W' in keys):
output[1] = 1
if (output == [0, 0, 0, 0, 0]):
output[4] = 1
return output | def keys_to_movement(keys):
'\n Convert keys to a ...multi-hot... array\n\n [A,W,D,S] boolean values.\n '
output = [0, 0, 0, 0, 0]
if ('A' in keys):
output[0] = 1
elif ('D' in keys):
output[2] = 1
elif ('S' in keys):
output[3] = 1
elif ('W' in keys):
output[1] = 1
if (output == [0, 0, 0, 0, 0]):
output[4] = 1
return output<|docstring|>Convert keys to a ...multi-hot... array
[A,W,D,S] boolean values.<|endoftext|> |
6853a5b494e80e43ee95030631cc5df3f0e0cc8bd17e8fba67847eaa64597876 | def keys_to_action(keys):
'\n Convert keys to a ...multi-hot... array\n\n [E,Q] boolean values.\n '
output = [0, 0, 0]
if ('E' in keys):
output[0] = 1
elif ('Q' in keys):
output[1] = 1
if (output == [0, 0, 0]):
output[2] = 1
return output | Convert keys to a ...multi-hot... array
[E,Q] boolean values. | utilities/getkeys.py | keys_to_action | workofart/brawlstars-ai | 12 | python | def keys_to_action(keys):
'\n Convert keys to a ...multi-hot... array\n\n [E,Q] boolean values.\n '
output = [0, 0, 0]
if ('E' in keys):
output[0] = 1
elif ('Q' in keys):
output[1] = 1
if (output == [0, 0, 0]):
output[2] = 1
return output | def keys_to_action(keys):
'\n Convert keys to a ...multi-hot... array\n\n [E,Q] boolean values.\n '
output = [0, 0, 0]
if ('E' in keys):
output[0] = 1
elif ('Q' in keys):
output[1] = 1
if (output == [0, 0, 0]):
output[2] = 1
return output<|docstring|>Convert keys to a ...multi-hot... array
[E,Q] boolean values.<|endoftext|> |
7611f1614a02b0eaa7727ef23e00e6f691aa2091861854e4d5b55f20a9664033 | def duplex_consensus(read1, read2):
'(pysam.calignedsegment.AlignedSegment, pysam.calignedsegment.AlignedSegment) ->\n pysam.calignedsegment.AlignedSegment\n Return consensus of complementary reads with N for inconsistent bases.\n '
consensus_seq = ''
consensus_qual = []
for i in range(read1.query_length):
if ((read1.query_sequence[i] == read2.query_sequence[i]) and (read1.query_qualities[i] > 29) and (read2.query_qualities[i] > 29)):
consensus_seq += read1.query_sequence[i]
mol_qual = sum([read1.query_qualities[i], read2.query_qualities[i]])
if (mol_qual > 60):
consensus_qual += [60]
else:
consensus_qual += [mol_qual]
else:
consensus_seq += 'N'
consensus_qual += [0]
return (consensus_seq, consensus_qual) | (pysam.calignedsegment.AlignedSegment, pysam.calignedsegment.AlignedSegment) ->
pysam.calignedsegment.AlignedSegment
Return consensus of complementary reads with N for inconsistent bases. | ConsensusCruncher/singleton_correction.py | duplex_consensus | kridel-lab/ConsensusCruncher | 17 | python | def duplex_consensus(read1, read2):
'(pysam.calignedsegment.AlignedSegment, pysam.calignedsegment.AlignedSegment) ->\n pysam.calignedsegment.AlignedSegment\n Return consensus of complementary reads with N for inconsistent bases.\n '
consensus_seq =
consensus_qual = []
for i in range(read1.query_length):
if ((read1.query_sequence[i] == read2.query_sequence[i]) and (read1.query_qualities[i] > 29) and (read2.query_qualities[i] > 29)):
consensus_seq += read1.query_sequence[i]
mol_qual = sum([read1.query_qualities[i], read2.query_qualities[i]])
if (mol_qual > 60):
consensus_qual += [60]
else:
consensus_qual += [mol_qual]
else:
consensus_seq += 'N'
consensus_qual += [0]
return (consensus_seq, consensus_qual) | def duplex_consensus(read1, read2):
'(pysam.calignedsegment.AlignedSegment, pysam.calignedsegment.AlignedSegment) ->\n pysam.calignedsegment.AlignedSegment\n Return consensus of complementary reads with N for inconsistent bases.\n '
consensus_seq =
consensus_qual = []
for i in range(read1.query_length):
if ((read1.query_sequence[i] == read2.query_sequence[i]) and (read1.query_qualities[i] > 29) and (read2.query_qualities[i] > 29)):
consensus_seq += read1.query_sequence[i]
mol_qual = sum([read1.query_qualities[i], read2.query_qualities[i]])
if (mol_qual > 60):
consensus_qual += [60]
else:
consensus_qual += [mol_qual]
else:
consensus_seq += 'N'
consensus_qual += [0]
return (consensus_seq, consensus_qual)<|docstring|>(pysam.calignedsegment.AlignedSegment, pysam.calignedsegment.AlignedSegment) ->
pysam.calignedsegment.AlignedSegment
Return consensus of complementary reads with N for inconsistent bases.<|endoftext|> |
18e6ffc1db7087a588a393adf3118af89231e0d8105d9fe73fb0cb65f144ab10 | def strand_correction(read_tag, duplex_tag, query_name, singleton_dict, sscs_dict=None):
"(str, str, dict, dict) -> Pysam.AlignedSegment\n Return 'corrected' singleton using complement read from opposite strand (either found in SSCS or singleton).\n\n Quality score calculated from singleton and complementary read. Read template based on singleton.\n "
read = singleton_dict[read_tag][0]
if (sscs_dict is None):
complement_read = singleton_dict[duplex_tag][0]
else:
complement_read = sscs_dict[duplex_tag][0]
dcs = duplex_consensus(read, complement_read)
dcs_read = create_aligned_segment([read], dcs[0], dcs[1], query_name)
return dcs_read | (str, str, dict, dict) -> Pysam.AlignedSegment
Return 'corrected' singleton using complement read from opposite strand (either found in SSCS or singleton).
Quality score calculated from singleton and complementary read. Read template based on singleton. | ConsensusCruncher/singleton_correction.py | strand_correction | kridel-lab/ConsensusCruncher | 17 | python | def strand_correction(read_tag, duplex_tag, query_name, singleton_dict, sscs_dict=None):
"(str, str, dict, dict) -> Pysam.AlignedSegment\n Return 'corrected' singleton using complement read from opposite strand (either found in SSCS or singleton).\n\n Quality score calculated from singleton and complementary read. Read template based on singleton.\n "
read = singleton_dict[read_tag][0]
if (sscs_dict is None):
complement_read = singleton_dict[duplex_tag][0]
else:
complement_read = sscs_dict[duplex_tag][0]
dcs = duplex_consensus(read, complement_read)
dcs_read = create_aligned_segment([read], dcs[0], dcs[1], query_name)
return dcs_read | def strand_correction(read_tag, duplex_tag, query_name, singleton_dict, sscs_dict=None):
"(str, str, dict, dict) -> Pysam.AlignedSegment\n Return 'corrected' singleton using complement read from opposite strand (either found in SSCS or singleton).\n\n Quality score calculated from singleton and complementary read. Read template based on singleton.\n "
read = singleton_dict[read_tag][0]
if (sscs_dict is None):
complement_read = singleton_dict[duplex_tag][0]
else:
complement_read = sscs_dict[duplex_tag][0]
dcs = duplex_consensus(read, complement_read)
dcs_read = create_aligned_segment([read], dcs[0], dcs[1], query_name)
return dcs_read<|docstring|>(str, str, dict, dict) -> Pysam.AlignedSegment
Return 'corrected' singleton using complement read from opposite strand (either found in SSCS or singleton).
Quality score calculated from singleton and complementary read. Read template based on singleton.<|endoftext|> |
b41f9d22b92e965444099acf4472ef7852a2a6ecdda0a4066da64a5095b13458 | def main():
'Singleton correction:\n - First correct with SSCS bam\n - Rescue remaining singletons with singleton bam\n '
parser = ArgumentParser()
parser.add_argument('--singleton', action='store', dest='singleton', help='input singleton BAM file', required=True, type=str)
parser.add_argument('--bedfile', action='store', dest='bedfile', help='Bedfile containing coordinates to subdivide the BAM file (Recommendation: cytoband.txt - See bed_separator.R for making your own bed file based on a target panel/specific coordinates)', required=False)
args = parser.parse_args()
start_time = time.time()
singleton_bam = pysam.AlignmentFile(args.singleton, 'rb')
sscs_bam = pysam.AlignmentFile('{}.sscs{}'.format(args.singleton.split('.singleton')[0], args.singleton.split('.singleton')[1]), 'rb')
sscs_correction_bam = pysam.AlignmentFile('{}.sscs.correction.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
singleton_correction_bam = pysam.AlignmentFile('{}.singleton.correction.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
uncorrected_bam = pysam.AlignmentFile('{}.uncorrected.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
stats = open('{}.stats.txt'.format(args.singleton.split('.singleton')[0]), 'a')
singleton_dict = collections.OrderedDict()
singleton_tag = collections.defaultdict(int)
singleton_pair = collections.defaultdict(list)
singleton_csn_pair = collections.defaultdict(list)
sscs_dict = collections.OrderedDict()
sscs_tag = collections.defaultdict(int)
sscs_pair = collections.defaultdict(list)
sscs_csn_pair = collections.defaultdict(list)
correction_dict = collections.OrderedDict()
singleton_counter = 0
singleton_unmapped = 0
singleton_multiple_mappings = 0
sscs_counter = 0
sscs_unmapped = 0
sscs_multiple_mappings = 0
sscs_dup_correction = 0
singleton_dup_correction = 0
uncorrected_singleton = 0
counter = 0
if (args.bedfile is not None):
division_coor = bed_separator(args.bedfile)
else:
division_coor = [1]
last_chr = 'chrM'
for x in division_coor:
if (division_coor == [1]):
read_chr = None
read_start = None
read_end = None
else:
read_chr = x.split('_', 1)[0]
read_start = division_coor[x][0]
read_end = division_coor[x][1]
if (last_chr != read_chr):
singleton_tag = collections.defaultdict(int)
sscs_dict = collections.OrderedDict()
sscs_tag = collections.defaultdict(int)
sscs_pair = collections.defaultdict(list)
sscs_csn_pair = collections.defaultdict(list)
last_chr = read_chr
singleton = read_bam(singleton_bam, pair_dict=singleton_pair, read_dict=singleton_dict, tag_dict=singleton_tag, csn_pair_dict=singleton_csn_pair, badRead_bam=None, duplex=True, read_chr=read_chr, read_start=read_start, read_end=read_end)
singleton_dict = singleton[0]
singleton_tag = singleton[1]
singleton_pair = singleton[2]
singleton_csn_pair = singleton[3]
singleton_counter += singleton[4]
singleton_unmapped += singleton[5]
singleton_multiple_mappings += singleton[6]
sscs = read_bam(sscs_bam, pair_dict=sscs_pair, read_dict=sscs_dict, tag_dict=sscs_tag, csn_pair_dict=sscs_csn_pair, badRead_bam=None, duplex=True, read_chr=read_chr, read_start=read_start, read_end=read_end)
sscs_dict = sscs[0]
sscs_tag = sscs[1]
sscs_pair = sscs[2]
sscs_csn_pair = sscs[3]
sscs_counter += sscs[4]
sscs_unmapped += sscs[5]
sscs_multiple_mappings += sscs[6]
for readPair in list(singleton_csn_pair.keys()):
for tag in singleton_csn_pair[readPair]:
counter += 1
duplex = duplex_tag(tag)
query_name = (readPair + ':1')
if (duplex in sscs_dict.keys()):
corrected_read = strand_correction(tag, duplex, query_name, singleton_dict, sscs_dict=sscs_dict)
sscs_dup_correction += 1
sscs_correction_bam.write(corrected_read)
del sscs_dict[duplex]
del singleton_dict[tag]
elif (duplex in singleton_dict.keys()):
corrected_read = strand_correction(tag, duplex, query_name, singleton_dict)
singleton_dup_correction += 1
singleton_correction_bam.write(corrected_read)
correction_dict[tag] = duplex
if (duplex in correction_dict.keys()):
del singleton_dict[tag]
del singleton_dict[duplex]
del correction_dict[tag]
del correction_dict[duplex]
else:
uncorrected_bam.write(singleton_dict[tag][0])
uncorrected_singleton += 1
del singleton_dict[tag]
del singleton_csn_pair[readPair]
sscs_correction_frac = ((sscs_dup_correction / singleton_counter) * 100)
singleton_correction_frac = ((singleton_dup_correction / singleton_counter) * 100)
summary_stats = '# === Singleton Correction ===\nTotal singletons: {}\nSingleton Correction by SSCS: {}\n% Singleton Correction by SSCS: {}\nSingleton Correction by Singletons: {}\n% Singleton Correction by Singletons : {}\nUncorrected Singletons: {} \n'.format(counter, sscs_dup_correction, sscs_correction_frac, singleton_dup_correction, singleton_correction_frac, uncorrected_singleton)
stats.write(summary_stats)
print(summary_stats)
singleton_bam.close()
sscs_bam.close()
sscs_correction_bam.close()
singleton_correction_bam.close()
uncorrected_bam.close()
stats.close() | Singleton correction:
- First correct with SSCS bam
- Rescue remaining singletons with singleton bam | ConsensusCruncher/singleton_correction.py | main | kridel-lab/ConsensusCruncher | 17 | python | def main():
'Singleton correction:\n - First correct with SSCS bam\n - Rescue remaining singletons with singleton bam\n '
parser = ArgumentParser()
parser.add_argument('--singleton', action='store', dest='singleton', help='input singleton BAM file', required=True, type=str)
parser.add_argument('--bedfile', action='store', dest='bedfile', help='Bedfile containing coordinates to subdivide the BAM file (Recommendation: cytoband.txt - See bed_separator.R for making your own bed file based on a target panel/specific coordinates)', required=False)
args = parser.parse_args()
start_time = time.time()
singleton_bam = pysam.AlignmentFile(args.singleton, 'rb')
sscs_bam = pysam.AlignmentFile('{}.sscs{}'.format(args.singleton.split('.singleton')[0], args.singleton.split('.singleton')[1]), 'rb')
sscs_correction_bam = pysam.AlignmentFile('{}.sscs.correction.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
singleton_correction_bam = pysam.AlignmentFile('{}.singleton.correction.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
uncorrected_bam = pysam.AlignmentFile('{}.uncorrected.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
stats = open('{}.stats.txt'.format(args.singleton.split('.singleton')[0]), 'a')
singleton_dict = collections.OrderedDict()
singleton_tag = collections.defaultdict(int)
singleton_pair = collections.defaultdict(list)
singleton_csn_pair = collections.defaultdict(list)
sscs_dict = collections.OrderedDict()
sscs_tag = collections.defaultdict(int)
sscs_pair = collections.defaultdict(list)
sscs_csn_pair = collections.defaultdict(list)
correction_dict = collections.OrderedDict()
singleton_counter = 0
singleton_unmapped = 0
singleton_multiple_mappings = 0
sscs_counter = 0
sscs_unmapped = 0
sscs_multiple_mappings = 0
sscs_dup_correction = 0
singleton_dup_correction = 0
uncorrected_singleton = 0
counter = 0
if (args.bedfile is not None):
division_coor = bed_separator(args.bedfile)
else:
division_coor = [1]
last_chr = 'chrM'
for x in division_coor:
if (division_coor == [1]):
read_chr = None
read_start = None
read_end = None
else:
read_chr = x.split('_', 1)[0]
read_start = division_coor[x][0]
read_end = division_coor[x][1]
if (last_chr != read_chr):
singleton_tag = collections.defaultdict(int)
sscs_dict = collections.OrderedDict()
sscs_tag = collections.defaultdict(int)
sscs_pair = collections.defaultdict(list)
sscs_csn_pair = collections.defaultdict(list)
last_chr = read_chr
singleton = read_bam(singleton_bam, pair_dict=singleton_pair, read_dict=singleton_dict, tag_dict=singleton_tag, csn_pair_dict=singleton_csn_pair, badRead_bam=None, duplex=True, read_chr=read_chr, read_start=read_start, read_end=read_end)
singleton_dict = singleton[0]
singleton_tag = singleton[1]
singleton_pair = singleton[2]
singleton_csn_pair = singleton[3]
singleton_counter += singleton[4]
singleton_unmapped += singleton[5]
singleton_multiple_mappings += singleton[6]
sscs = read_bam(sscs_bam, pair_dict=sscs_pair, read_dict=sscs_dict, tag_dict=sscs_tag, csn_pair_dict=sscs_csn_pair, badRead_bam=None, duplex=True, read_chr=read_chr, read_start=read_start, read_end=read_end)
sscs_dict = sscs[0]
sscs_tag = sscs[1]
sscs_pair = sscs[2]
sscs_csn_pair = sscs[3]
sscs_counter += sscs[4]
sscs_unmapped += sscs[5]
sscs_multiple_mappings += sscs[6]
for readPair in list(singleton_csn_pair.keys()):
for tag in singleton_csn_pair[readPair]:
counter += 1
duplex = duplex_tag(tag)
query_name = (readPair + ':1')
if (duplex in sscs_dict.keys()):
corrected_read = strand_correction(tag, duplex, query_name, singleton_dict, sscs_dict=sscs_dict)
sscs_dup_correction += 1
sscs_correction_bam.write(corrected_read)
del sscs_dict[duplex]
del singleton_dict[tag]
elif (duplex in singleton_dict.keys()):
corrected_read = strand_correction(tag, duplex, query_name, singleton_dict)
singleton_dup_correction += 1
singleton_correction_bam.write(corrected_read)
correction_dict[tag] = duplex
if (duplex in correction_dict.keys()):
del singleton_dict[tag]
del singleton_dict[duplex]
del correction_dict[tag]
del correction_dict[duplex]
else:
uncorrected_bam.write(singleton_dict[tag][0])
uncorrected_singleton += 1
del singleton_dict[tag]
del singleton_csn_pair[readPair]
sscs_correction_frac = ((sscs_dup_correction / singleton_counter) * 100)
singleton_correction_frac = ((singleton_dup_correction / singleton_counter) * 100)
summary_stats = '# === Singleton Correction ===\nTotal singletons: {}\nSingleton Correction by SSCS: {}\n% Singleton Correction by SSCS: {}\nSingleton Correction by Singletons: {}\n% Singleton Correction by Singletons : {}\nUncorrected Singletons: {} \n'.format(counter, sscs_dup_correction, sscs_correction_frac, singleton_dup_correction, singleton_correction_frac, uncorrected_singleton)
stats.write(summary_stats)
print(summary_stats)
singleton_bam.close()
sscs_bam.close()
sscs_correction_bam.close()
singleton_correction_bam.close()
uncorrected_bam.close()
stats.close() | def main():
'Singleton correction:\n - First correct with SSCS bam\n - Rescue remaining singletons with singleton bam\n '
parser = ArgumentParser()
parser.add_argument('--singleton', action='store', dest='singleton', help='input singleton BAM file', required=True, type=str)
parser.add_argument('--bedfile', action='store', dest='bedfile', help='Bedfile containing coordinates to subdivide the BAM file (Recommendation: cytoband.txt - See bed_separator.R for making your own bed file based on a target panel/specific coordinates)', required=False)
args = parser.parse_args()
start_time = time.time()
singleton_bam = pysam.AlignmentFile(args.singleton, 'rb')
sscs_bam = pysam.AlignmentFile('{}.sscs{}'.format(args.singleton.split('.singleton')[0], args.singleton.split('.singleton')[1]), 'rb')
sscs_correction_bam = pysam.AlignmentFile('{}.sscs.correction.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
singleton_correction_bam = pysam.AlignmentFile('{}.singleton.correction.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
uncorrected_bam = pysam.AlignmentFile('{}.uncorrected.bam'.format(args.singleton.split('.singleton')[0]), 'wb', template=singleton_bam)
stats = open('{}.stats.txt'.format(args.singleton.split('.singleton')[0]), 'a')
singleton_dict = collections.OrderedDict()
singleton_tag = collections.defaultdict(int)
singleton_pair = collections.defaultdict(list)
singleton_csn_pair = collections.defaultdict(list)
sscs_dict = collections.OrderedDict()
sscs_tag = collections.defaultdict(int)
sscs_pair = collections.defaultdict(list)
sscs_csn_pair = collections.defaultdict(list)
correction_dict = collections.OrderedDict()
singleton_counter = 0
singleton_unmapped = 0
singleton_multiple_mappings = 0
sscs_counter = 0
sscs_unmapped = 0
sscs_multiple_mappings = 0
sscs_dup_correction = 0
singleton_dup_correction = 0
uncorrected_singleton = 0
counter = 0
if (args.bedfile is not None):
division_coor = bed_separator(args.bedfile)
else:
division_coor = [1]
last_chr = 'chrM'
for x in division_coor:
if (division_coor == [1]):
read_chr = None
read_start = None
read_end = None
else:
read_chr = x.split('_', 1)[0]
read_start = division_coor[x][0]
read_end = division_coor[x][1]
if (last_chr != read_chr):
singleton_tag = collections.defaultdict(int)
sscs_dict = collections.OrderedDict()
sscs_tag = collections.defaultdict(int)
sscs_pair = collections.defaultdict(list)
sscs_csn_pair = collections.defaultdict(list)
last_chr = read_chr
singleton = read_bam(singleton_bam, pair_dict=singleton_pair, read_dict=singleton_dict, tag_dict=singleton_tag, csn_pair_dict=singleton_csn_pair, badRead_bam=None, duplex=True, read_chr=read_chr, read_start=read_start, read_end=read_end)
singleton_dict = singleton[0]
singleton_tag = singleton[1]
singleton_pair = singleton[2]
singleton_csn_pair = singleton[3]
singleton_counter += singleton[4]
singleton_unmapped += singleton[5]
singleton_multiple_mappings += singleton[6]
sscs = read_bam(sscs_bam, pair_dict=sscs_pair, read_dict=sscs_dict, tag_dict=sscs_tag, csn_pair_dict=sscs_csn_pair, badRead_bam=None, duplex=True, read_chr=read_chr, read_start=read_start, read_end=read_end)
sscs_dict = sscs[0]
sscs_tag = sscs[1]
sscs_pair = sscs[2]
sscs_csn_pair = sscs[3]
sscs_counter += sscs[4]
sscs_unmapped += sscs[5]
sscs_multiple_mappings += sscs[6]
for readPair in list(singleton_csn_pair.keys()):
for tag in singleton_csn_pair[readPair]:
counter += 1
duplex = duplex_tag(tag)
query_name = (readPair + ':1')
if (duplex in sscs_dict.keys()):
corrected_read = strand_correction(tag, duplex, query_name, singleton_dict, sscs_dict=sscs_dict)
sscs_dup_correction += 1
sscs_correction_bam.write(corrected_read)
del sscs_dict[duplex]
del singleton_dict[tag]
elif (duplex in singleton_dict.keys()):
corrected_read = strand_correction(tag, duplex, query_name, singleton_dict)
singleton_dup_correction += 1
singleton_correction_bam.write(corrected_read)
correction_dict[tag] = duplex
if (duplex in correction_dict.keys()):
del singleton_dict[tag]
del singleton_dict[duplex]
del correction_dict[tag]
del correction_dict[duplex]
else:
uncorrected_bam.write(singleton_dict[tag][0])
uncorrected_singleton += 1
del singleton_dict[tag]
del singleton_csn_pair[readPair]
sscs_correction_frac = ((sscs_dup_correction / singleton_counter) * 100)
singleton_correction_frac = ((singleton_dup_correction / singleton_counter) * 100)
summary_stats = '# === Singleton Correction ===\nTotal singletons: {}\nSingleton Correction by SSCS: {}\n% Singleton Correction by SSCS: {}\nSingleton Correction by Singletons: {}\n% Singleton Correction by Singletons : {}\nUncorrected Singletons: {} \n'.format(counter, sscs_dup_correction, sscs_correction_frac, singleton_dup_correction, singleton_correction_frac, uncorrected_singleton)
stats.write(summary_stats)
print(summary_stats)
singleton_bam.close()
sscs_bam.close()
sscs_correction_bam.close()
singleton_correction_bam.close()
uncorrected_bam.close()
stats.close()<|docstring|>Singleton correction:
- First correct with SSCS bam
- Rescue remaining singletons with singleton bam<|endoftext|> |
3a3cac939226ecdcb94ce007a4604043a3b4bd420acf9308197d4e9e2407bcb5 | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.Count = channel.unary_unary('/bluzelle.curium.crud.Msg/Count', request_serializer=crud_dot_tx__pb2.MsgCount.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgCountResponse.FromString)
self.RenewLeasesAll = channel.unary_unary('/bluzelle.curium.crud.Msg/RenewLeasesAll', request_serializer=crud_dot_tx__pb2.MsgRenewLeasesAll.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenewLeasesAllResponse.FromString)
self.RenewLease = channel.unary_unary('/bluzelle.curium.crud.Msg/RenewLease', request_serializer=crud_dot_tx__pb2.MsgRenewLease.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenewLeaseResponse.FromString)
self.GetNShortestLeases = channel.unary_unary('/bluzelle.curium.crud.Msg/GetNShortestLeases', request_serializer=crud_dot_tx__pb2.MsgGetNShortestLeases.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgGetNShortestLeasesResponse.FromString)
self.Keys = channel.unary_unary('/bluzelle.curium.crud.Msg/Keys', request_serializer=crud_dot_tx__pb2.MsgKeys.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgKeysResponse.FromString)
self.Rename = channel.unary_unary('/bluzelle.curium.crud.Msg/Rename', request_serializer=crud_dot_tx__pb2.MsgRename.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenameResponse.FromString)
self.MultiUpdate = channel.unary_unary('/bluzelle.curium.crud.Msg/MultiUpdate', request_serializer=crud_dot_tx__pb2.MsgMultiUpdate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgMultiUpdateResponse.FromString)
self.DeleteAll = channel.unary_unary('/bluzelle.curium.crud.Msg/DeleteAll', request_serializer=crud_dot_tx__pb2.MsgDeleteAll.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgDeleteAllResponse.FromString)
self.KeyValues = channel.unary_unary('/bluzelle.curium.crud.Msg/KeyValues', request_serializer=crud_dot_tx__pb2.MsgKeyValues.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgKeyValuesResponse.FromString)
self.Has = channel.unary_unary('/bluzelle.curium.crud.Msg/Has', request_serializer=crud_dot_tx__pb2.MsgHas.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgHasResponse.FromString)
self.GetLease = channel.unary_unary('/bluzelle.curium.crud.Msg/GetLease', request_serializer=crud_dot_tx__pb2.MsgGetLease.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgGetLeaseResponse.FromString)
self.Read = channel.unary_unary('/bluzelle.curium.crud.Msg/Read', request_serializer=crud_dot_tx__pb2.MsgRead.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgReadResponse.FromString)
self.Upsert = channel.unary_unary('/bluzelle.curium.crud.Msg/Upsert', request_serializer=crud_dot_tx__pb2.MsgUpsert.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgUpsertResponse.FromString)
self.Create = channel.unary_unary('/bluzelle.curium.crud.Msg/Create', request_serializer=crud_dot_tx__pb2.MsgCreate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgCreateResponse.FromString)
self.Update = channel.unary_unary('/bluzelle.curium.crud.Msg/Update', request_serializer=crud_dot_tx__pb2.MsgUpdate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgUpdateResponse.FromString)
self.Delete = channel.unary_unary('/bluzelle.curium.crud.Msg/Delete', request_serializer=crud_dot_tx__pb2.MsgDelete.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgDeleteResponse.FromString) | Constructor.
Args:
channel: A grpc.Channel. | bluzelle/codec/crud/tx_pb2_grpc.py | __init__ | hhio618/bluezelle-py | 3 | python | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.Count = channel.unary_unary('/bluzelle.curium.crud.Msg/Count', request_serializer=crud_dot_tx__pb2.MsgCount.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgCountResponse.FromString)
self.RenewLeasesAll = channel.unary_unary('/bluzelle.curium.crud.Msg/RenewLeasesAll', request_serializer=crud_dot_tx__pb2.MsgRenewLeasesAll.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenewLeasesAllResponse.FromString)
self.RenewLease = channel.unary_unary('/bluzelle.curium.crud.Msg/RenewLease', request_serializer=crud_dot_tx__pb2.MsgRenewLease.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenewLeaseResponse.FromString)
self.GetNShortestLeases = channel.unary_unary('/bluzelle.curium.crud.Msg/GetNShortestLeases', request_serializer=crud_dot_tx__pb2.MsgGetNShortestLeases.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgGetNShortestLeasesResponse.FromString)
self.Keys = channel.unary_unary('/bluzelle.curium.crud.Msg/Keys', request_serializer=crud_dot_tx__pb2.MsgKeys.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgKeysResponse.FromString)
self.Rename = channel.unary_unary('/bluzelle.curium.crud.Msg/Rename', request_serializer=crud_dot_tx__pb2.MsgRename.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenameResponse.FromString)
self.MultiUpdate = channel.unary_unary('/bluzelle.curium.crud.Msg/MultiUpdate', request_serializer=crud_dot_tx__pb2.MsgMultiUpdate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgMultiUpdateResponse.FromString)
self.DeleteAll = channel.unary_unary('/bluzelle.curium.crud.Msg/DeleteAll', request_serializer=crud_dot_tx__pb2.MsgDeleteAll.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgDeleteAllResponse.FromString)
self.KeyValues = channel.unary_unary('/bluzelle.curium.crud.Msg/KeyValues', request_serializer=crud_dot_tx__pb2.MsgKeyValues.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgKeyValuesResponse.FromString)
self.Has = channel.unary_unary('/bluzelle.curium.crud.Msg/Has', request_serializer=crud_dot_tx__pb2.MsgHas.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgHasResponse.FromString)
self.GetLease = channel.unary_unary('/bluzelle.curium.crud.Msg/GetLease', request_serializer=crud_dot_tx__pb2.MsgGetLease.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgGetLeaseResponse.FromString)
self.Read = channel.unary_unary('/bluzelle.curium.crud.Msg/Read', request_serializer=crud_dot_tx__pb2.MsgRead.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgReadResponse.FromString)
self.Upsert = channel.unary_unary('/bluzelle.curium.crud.Msg/Upsert', request_serializer=crud_dot_tx__pb2.MsgUpsert.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgUpsertResponse.FromString)
self.Create = channel.unary_unary('/bluzelle.curium.crud.Msg/Create', request_serializer=crud_dot_tx__pb2.MsgCreate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgCreateResponse.FromString)
self.Update = channel.unary_unary('/bluzelle.curium.crud.Msg/Update', request_serializer=crud_dot_tx__pb2.MsgUpdate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgUpdateResponse.FromString)
self.Delete = channel.unary_unary('/bluzelle.curium.crud.Msg/Delete', request_serializer=crud_dot_tx__pb2.MsgDelete.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgDeleteResponse.FromString) | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.Count = channel.unary_unary('/bluzelle.curium.crud.Msg/Count', request_serializer=crud_dot_tx__pb2.MsgCount.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgCountResponse.FromString)
self.RenewLeasesAll = channel.unary_unary('/bluzelle.curium.crud.Msg/RenewLeasesAll', request_serializer=crud_dot_tx__pb2.MsgRenewLeasesAll.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenewLeasesAllResponse.FromString)
self.RenewLease = channel.unary_unary('/bluzelle.curium.crud.Msg/RenewLease', request_serializer=crud_dot_tx__pb2.MsgRenewLease.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenewLeaseResponse.FromString)
self.GetNShortestLeases = channel.unary_unary('/bluzelle.curium.crud.Msg/GetNShortestLeases', request_serializer=crud_dot_tx__pb2.MsgGetNShortestLeases.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgGetNShortestLeasesResponse.FromString)
self.Keys = channel.unary_unary('/bluzelle.curium.crud.Msg/Keys', request_serializer=crud_dot_tx__pb2.MsgKeys.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgKeysResponse.FromString)
self.Rename = channel.unary_unary('/bluzelle.curium.crud.Msg/Rename', request_serializer=crud_dot_tx__pb2.MsgRename.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgRenameResponse.FromString)
self.MultiUpdate = channel.unary_unary('/bluzelle.curium.crud.Msg/MultiUpdate', request_serializer=crud_dot_tx__pb2.MsgMultiUpdate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgMultiUpdateResponse.FromString)
self.DeleteAll = channel.unary_unary('/bluzelle.curium.crud.Msg/DeleteAll', request_serializer=crud_dot_tx__pb2.MsgDeleteAll.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgDeleteAllResponse.FromString)
self.KeyValues = channel.unary_unary('/bluzelle.curium.crud.Msg/KeyValues', request_serializer=crud_dot_tx__pb2.MsgKeyValues.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgKeyValuesResponse.FromString)
self.Has = channel.unary_unary('/bluzelle.curium.crud.Msg/Has', request_serializer=crud_dot_tx__pb2.MsgHas.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgHasResponse.FromString)
self.GetLease = channel.unary_unary('/bluzelle.curium.crud.Msg/GetLease', request_serializer=crud_dot_tx__pb2.MsgGetLease.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgGetLeaseResponse.FromString)
self.Read = channel.unary_unary('/bluzelle.curium.crud.Msg/Read', request_serializer=crud_dot_tx__pb2.MsgRead.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgReadResponse.FromString)
self.Upsert = channel.unary_unary('/bluzelle.curium.crud.Msg/Upsert', request_serializer=crud_dot_tx__pb2.MsgUpsert.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgUpsertResponse.FromString)
self.Create = channel.unary_unary('/bluzelle.curium.crud.Msg/Create', request_serializer=crud_dot_tx__pb2.MsgCreate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgCreateResponse.FromString)
self.Update = channel.unary_unary('/bluzelle.curium.crud.Msg/Update', request_serializer=crud_dot_tx__pb2.MsgUpdate.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgUpdateResponse.FromString)
self.Delete = channel.unary_unary('/bluzelle.curium.crud.Msg/Delete', request_serializer=crud_dot_tx__pb2.MsgDelete.SerializeToString, response_deserializer=crud_dot_tx__pb2.MsgDeleteResponse.FromString)<|docstring|>Constructor.
Args:
channel: A grpc.Channel.<|endoftext|> |
c267fc6070c6e65b9755949070fe456fe567c8cac338e9cfe16e42e55dcbc108 | def Count(self, request, context):
'this line is used by starport scaffolding # proto/tx/rpc.'
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | this line is used by starport scaffolding # proto/tx/rpc. | bluzelle/codec/crud/tx_pb2_grpc.py | Count | hhio618/bluezelle-py | 3 | python | def Count(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | def Count(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')<|docstring|>this line is used by starport scaffolding # proto/tx/rpc.<|endoftext|> |
65d670443d9e5dc6a85db6c4844b2b6c1f1f8270f6e82997d39cefc9b360fcce | def RenewLeasesAll(self, request, context):
'Missing associated documentation comment in .proto file.'
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | Missing associated documentation comment in .proto file. | bluzelle/codec/crud/tx_pb2_grpc.py | RenewLeasesAll | hhio618/bluezelle-py | 3 | python | def RenewLeasesAll(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | def RenewLeasesAll(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')<|docstring|>Missing associated documentation comment in .proto file.<|endoftext|> |
5b923c0e13e1a76caf4aacbfa08dabd61be01bbfcc2918335ed9e1e37f3b2aea | def RenewLease(self, request, context):
'Missing associated documentation comment in .proto file.'
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | Missing associated documentation comment in .proto file. | bluzelle/codec/crud/tx_pb2_grpc.py | RenewLease | hhio618/bluezelle-py | 3 | python | def RenewLease(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | def RenewLease(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')<|docstring|>Missing associated documentation comment in .proto file.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.