body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@pytest.mark.parametrize('n_voters', range(8))
def test_vote_actions(n_voters):
'* Legal transitions are UNDECIDED -> [VALID|INVALID] only\n * Block is never left UNDECIDED after voting\n * Accomodates rogues on previous block / invalid schema\n '
class TestVoting(Voting):
@classmethod
def verify_vote_schema(cls, vote):
return (type(vote['vote']['is_block_valid']) == bool)
@classmethod
def verify_vote_signature(cls, vote):
return True
keyring = 'abcdefghijklmnopqrstuvwxyz'[:n_voters]
block = {'id': 'block', 'block': {'voters': keyring}}
state = UNDECIDED
todo = [(state, [], [])]
def branch(p, r):
todo.append((state, votes, (votes + [{'node_pubkey': keyring[len(votes)], 'vote': {'previous_block': p, 'is_block_valid': r}}])))
while todo:
(prev_state, prev_votes, votes) = todo.pop(0)
results = Counter((v['vote']['is_block_valid'] for v in votes))
prev_blocks = Counter((v['vote']['previous_block'] for v in votes))
majority = ((n_voters // 2) + 1)
honest = ((len(votes) == majority) and (len(prev_blocks) == 1) and (not results['lol']) and (len(results) == 1))
closed = (len(votes) == n_voters)
if votes:
state = TestVoting.block_election(block, votes, keyring)['status']
assert (prev_state in [state, UNDECIDED])
if (honest or closed):
assert ((state != UNDECIDED) or (n_voters == 0))
if closed:
continue
branch('A', True)
branch('B', True)
branch('A', False)
branch('A', 'lol')
| -6,545,330,417,013,052,000
|
* Legal transitions are UNDECIDED -> [VALID|INVALID] only
* Block is never left UNDECIDED after voting
* Accomodates rogues on previous block / invalid schema
|
tests/test_voting.py
|
test_vote_actions
|
RiddleAndCode/bigchaindb
|
python
|
@pytest.mark.parametrize('n_voters', range(8))
def test_vote_actions(n_voters):
'* Legal transitions are UNDECIDED -> [VALID|INVALID] only\n * Block is never left UNDECIDED after voting\n * Accomodates rogues on previous block / invalid schema\n '
class TestVoting(Voting):
@classmethod
def verify_vote_schema(cls, vote):
return (type(vote['vote']['is_block_valid']) == bool)
@classmethod
def verify_vote_signature(cls, vote):
return True
keyring = 'abcdefghijklmnopqrstuvwxyz'[:n_voters]
block = {'id': 'block', 'block': {'voters': keyring}}
state = UNDECIDED
todo = [(state, [], [])]
def branch(p, r):
todo.append((state, votes, (votes + [{'node_pubkey': keyring[len(votes)], 'vote': {'previous_block': p, 'is_block_valid': r}}])))
while todo:
(prev_state, prev_votes, votes) = todo.pop(0)
results = Counter((v['vote']['is_block_valid'] for v in votes))
prev_blocks = Counter((v['vote']['previous_block'] for v in votes))
majority = ((n_voters // 2) + 1)
honest = ((len(votes) == majority) and (len(prev_blocks) == 1) and (not results['lol']) and (len(results) == 1))
closed = (len(votes) == n_voters)
if votes:
state = TestVoting.block_election(block, votes, keyring)['status']
assert (prev_state in [state, UNDECIDED])
if (honest or closed):
assert ((state != UNDECIDED) or (n_voters == 0))
if closed:
continue
branch('A', True)
branch('B', True)
branch('A', False)
branch('A', 'lol')
|
def get_home():
'Get home directory of user, using Windows home directory for WSL.'
if (PF in {'WSL1', 'WSL2'}):
return (wsl.get_wsl_home() or Path.home().expanduser())
return Path.home().expanduser()
| -8,916,673,993,594,660,000
|
Get home directory of user, using Windows home directory for WSL.
|
sc2/paths.py
|
get_home
|
Sc2-AI-Cup/example-bot-marinerush
|
python
|
def get_home():
if (PF in {'WSL1', 'WSL2'}):
return (wsl.get_wsl_home() or Path.home().expanduser())
return Path.home().expanduser()
|
def get_user_sc2_install():
"Attempts to find a user's SC2 install if their OS has ExecuteInfo.txt"
if USERPATH[PF]:
einfo = str((get_home() / Path(USERPATH[PF])))
if os.path.isfile(einfo):
with open(einfo) as f:
content = f.read()
if content:
base = re.search(' = (.*)Versions', content).group(1)
if (PF in {'WSL1', 'WSL2'}):
base = str(wsl.win_path_to_wsl_path(base))
if os.path.exists(base):
return base
return None
| -7,270,016,686,687,919,000
|
Attempts to find a user's SC2 install if their OS has ExecuteInfo.txt
|
sc2/paths.py
|
get_user_sc2_install
|
Sc2-AI-Cup/example-bot-marinerush
|
python
|
def get_user_sc2_install():
if USERPATH[PF]:
einfo = str((get_home() / Path(USERPATH[PF])))
if os.path.isfile(einfo):
with open(einfo) as f:
content = f.read()
if content:
base = re.search(' = (.*)Versions', content).group(1)
if (PF in {'WSL1', 'WSL2'}):
base = str(wsl.win_path_to_wsl_path(base))
if os.path.exists(base):
return base
return None
|
def logits_process(logits):
'\n Get the logits as a tuple of softmax logits ,bounding boxes and labels.\n Output: to matrices:\n logits_mat in size (dataset, 300, 1231) - top 300 logits for each image.\n bboxes_mat in size (dataset, 300, 4) - top 300 bboxes for each image.\n labels_mat in size (dataset, 300, 1) - corresponding labels. 300 for each image.\n '
logits_mat = np.zeros((TEMP_DATASET_SIZE, 300, 1231))
bboxes_mat = np.zeros((TEMP_DATASET_SIZE, 300, 4))
labels_mat = np.zeros((TEMP_DATASET_SIZE, 300))
proposal_num = np.zeros((TEMP_DATASET_SIZE, 300, 1))
for (i, image) in enumerate(logits):
for (j, bbox) in enumerate(image[0]):
index = int(bbox[5].item())
logits_vector = image[2][index]
bboxes_mat[i][j][:] = bbox[:4]
logits_mat[i][j] = np.array(logits_vector)
proposal_num[i][j] = bbox[(- 1)]
labels_mat[i] = image[1]
return (bboxes_mat, labels_mat, logits_mat, proposal_num)
| 7,758,338,616,256,155,000
|
Get the logits as a tuple of softmax logits ,bounding boxes and labels.
Output: to matrices:
logits_mat in size (dataset, 300, 1231) - top 300 logits for each image.
bboxes_mat in size (dataset, 300, 4) - top 300 bboxes for each image.
labels_mat in size (dataset, 300, 1) - corresponding labels. 300 for each image.
|
tools/test_lvis.py
|
logits_process
|
ydiller/BalancedGroupSoftmax
|
python
|
def logits_process(logits):
'\n Get the logits as a tuple of softmax logits ,bounding boxes and labels.\n Output: to matrices:\n logits_mat in size (dataset, 300, 1231) - top 300 logits for each image.\n bboxes_mat in size (dataset, 300, 4) - top 300 bboxes for each image.\n labels_mat in size (dataset, 300, 1) - corresponding labels. 300 for each image.\n '
logits_mat = np.zeros((TEMP_DATASET_SIZE, 300, 1231))
bboxes_mat = np.zeros((TEMP_DATASET_SIZE, 300, 4))
labels_mat = np.zeros((TEMP_DATASET_SIZE, 300))
proposal_num = np.zeros((TEMP_DATASET_SIZE, 300, 1))
for (i, image) in enumerate(logits):
for (j, bbox) in enumerate(image[0]):
index = int(bbox[5].item())
logits_vector = image[2][index]
bboxes_mat[i][j][:] = bbox[:4]
logits_mat[i][j] = np.array(logits_vector)
proposal_num[i][j] = bbox[(- 1)]
labels_mat[i] = image[1]
return (bboxes_mat, labels_mat, logits_mat, proposal_num)
|
def find_file(path, reg):
'\n path: 要遍历的目录\n reg: 符合条件的文件\n '
FileLst = []
try:
lst = os.walk(path)
for (root, dirs, files) in lst:
for name in files:
try:
m = re.match(reg, name)
except Exception as e:
continue
if m:
FileLst.append(os.path.join(root, name))
except Exception as e:
print(str(e))
return sorted(FileLst)
| -6,400,951,898,583,274,000
|
path: 要遍历的目录
reg: 符合条件的文件
|
lib/pb_io.py
|
find_file
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def find_file(path, reg):
'\n path: 要遍历的目录\n reg: 符合条件的文件\n '
FileLst = []
try:
lst = os.walk(path)
for (root, dirs, files) in lst:
for name in files:
try:
m = re.match(reg, name)
except Exception as e:
continue
if m:
FileLst.append(os.path.join(root, name))
except Exception as e:
print(str(e))
return sorted(FileLst)
|
def path_replace_ymd(path, ymd):
'\n path:替换路径中的日期 ,path中%YYYY%MM%DD%JJJ 等关键字会被ymd日期实例\n ymd: yyyymmdd (20180101)\n '
ymd = datetime.strptime(ymd, '%Y%m%d')
yy = ymd.strftime('%Y')
mm = ymd.strftime('%m')
dd = ymd.strftime('%d')
jj = ymd.strftime('%j')
path = path.replace('%YYYY', yy)
path = path.replace('%MM', mm)
path = path.replace('%DD', dd)
path = path.replace('%JJJ', jj)
return path
| 4,353,463,753,659,498,500
|
path:替换路径中的日期 ,path中%YYYY%MM%DD%JJJ 等关键字会被ymd日期实例
ymd: yyyymmdd (20180101)
|
lib/pb_io.py
|
path_replace_ymd
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def path_replace_ymd(path, ymd):
'\n path:替换路径中的日期 ,path中%YYYY%MM%DD%JJJ 等关键字会被ymd日期实例\n ymd: yyyymmdd (20180101)\n '
ymd = datetime.strptime(ymd, '%Y%m%d')
yy = ymd.strftime('%Y')
mm = ymd.strftime('%m')
dd = ymd.strftime('%d')
jj = ymd.strftime('%j')
path = path.replace('%YYYY', yy)
path = path.replace('%MM', mm)
path = path.replace('%DD', dd)
path = path.replace('%JJJ', jj)
return path
|
def is_none(*args):
'\n 判断传入的变量中是否有 None\n :param args:\n :return:\n '
has_none = False
for arg in args:
if (arg is None):
has_none = True
return has_none
| 7,719,742,756,869,125,000
|
判断传入的变量中是否有 None
:param args:
:return:
|
lib/pb_io.py
|
is_none
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def is_none(*args):
'\n 判断传入的变量中是否有 None\n :param args:\n :return:\n '
has_none = False
for arg in args:
if (arg is None):
has_none = True
return has_none
|
def copy_attrs_h5py(pre_object, out_object):
'\n 复制 file、dataset 或者 group 的属性\n :param pre_object: 被复制属性的 dataset 或者 group\n :param out_object: 复制属性的 dataset 或者 group\n :return:\n '
for akey in list(pre_object.attrs.keys()):
out_object.attrs[akey] = pre_object.attrs[akey]
| 6,695,034,932,447,956,000
|
复制 file、dataset 或者 group 的属性
:param pre_object: 被复制属性的 dataset 或者 group
:param out_object: 复制属性的 dataset 或者 group
:return:
|
lib/pb_io.py
|
copy_attrs_h5py
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def copy_attrs_h5py(pre_object, out_object):
'\n 复制 file、dataset 或者 group 的属性\n :param pre_object: 被复制属性的 dataset 或者 group\n :param out_object: 复制属性的 dataset 或者 group\n :return:\n '
for akey in list(pre_object.attrs.keys()):
out_object.attrs[akey] = pre_object.attrs[akey]
|
def read_dataset_hdf5(file_path, set_name):
'\n 读取 hdf5 文件,返回一个 numpy 多维数组\n :param file_path: (unicode)文件路径\n :param set_name: (str or list)表的名字\n :return: 如果传入的表名字是一个字符串,返回 numpy.ndarray\n 如果传入的表名字是一个列表,返回一个字典,key 是表名字,\n value 是 numpy.ndarry\n '
if isinstance(set_name, str):
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
data = file_h5py.get(set_name)[:]
dataset = np.array(data)
file_h5py.close()
return dataset
else:
raise ValueError('value error: file_path')
elif isinstance(set_name, list):
datasets = {}
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
for name in set_name:
data = file_h5py.get(name)[:]
dataset = np.array(data)
datasets[name] = dataset
file_h5py.close()
return datasets
else:
raise ValueError('value error: file_path')
else:
raise ValueError('value error: set_name')
| 828,793,903,532,586,800
|
读取 hdf5 文件,返回一个 numpy 多维数组
:param file_path: (unicode)文件路径
:param set_name: (str or list)表的名字
:return: 如果传入的表名字是一个字符串,返回 numpy.ndarray
如果传入的表名字是一个列表,返回一个字典,key 是表名字,
value 是 numpy.ndarry
|
lib/pb_io.py
|
read_dataset_hdf5
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def read_dataset_hdf5(file_path, set_name):
'\n 读取 hdf5 文件,返回一个 numpy 多维数组\n :param file_path: (unicode)文件路径\n :param set_name: (str or list)表的名字\n :return: 如果传入的表名字是一个字符串,返回 numpy.ndarray\n 如果传入的表名字是一个列表,返回一个字典,key 是表名字,\n value 是 numpy.ndarry\n '
if isinstance(set_name, str):
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
data = file_h5py.get(set_name)[:]
dataset = np.array(data)
file_h5py.close()
return dataset
else:
raise ValueError('value error: file_path')
elif isinstance(set_name, list):
datasets = {}
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
for name in set_name:
data = file_h5py.get(name)[:]
dataset = np.array(data)
datasets[name] = dataset
file_h5py.close()
return datasets
else:
raise ValueError('value error: file_path')
else:
raise ValueError('value error: set_name')
|
def attrs2dict(attrs):
'\n 将一个 HDF5 attr 类转为 Dict 类\n :return:\n '
d = {}
for (k, v) in list(attrs.items()):
d[k] = v
return d
| -215,780,085,529,066,880
|
将一个 HDF5 attr 类转为 Dict 类
:return:
|
lib/pb_io.py
|
attrs2dict
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def attrs2dict(attrs):
'\n 将一个 HDF5 attr 类转为 Dict 类\n :return:\n '
d = {}
for (k, v) in list(attrs.items()):
d[k] = v
return d
|
def write_txt(in_file, head, bodys, keylens=8):
'\n description: wangpeng add 20180615 (写入或更新txt)\n :in_file 写入文件位置\n :head 文件头信息\n :bodys 文件体\n :keylens 更新文件使用的第一列关键字长度\n '
allLines = []
DICT_D = {}
FilePath = os.path.dirname(in_file)
if (not os.path.exists(FilePath)):
os.makedirs(FilePath)
if (os.path.isfile(in_file) and (os.path.getsize(in_file) != 0)):
fp = open(in_file, 'r')
fp.readline()
Lines = fp.readlines()
fp.close()
for Line in Lines:
DICT_D[Line[:keylens]] = Line[keylens:]
for Line in bodys:
DICT_D[Line[:keylens]] = Line[keylens:]
newLines = sorted(iter(DICT_D.items()), key=(lambda d: d[0]), reverse=False)
for i in range(len(newLines)):
allLines.append((str(newLines[i][0]) + str(newLines[i][1])))
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(allLines)
fp.close()
else:
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(bodys)
fp.close()
| 2,325,851,969,103,667,700
|
description: wangpeng add 20180615 (写入或更新txt)
:in_file 写入文件位置
:head 文件头信息
:bodys 文件体
:keylens 更新文件使用的第一列关键字长度
|
lib/pb_io.py
|
write_txt
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def write_txt(in_file, head, bodys, keylens=8):
'\n description: wangpeng add 20180615 (写入或更新txt)\n :in_file 写入文件位置\n :head 文件头信息\n :bodys 文件体\n :keylens 更新文件使用的第一列关键字长度\n '
allLines = []
DICT_D = {}
FilePath = os.path.dirname(in_file)
if (not os.path.exists(FilePath)):
os.makedirs(FilePath)
if (os.path.isfile(in_file) and (os.path.getsize(in_file) != 0)):
fp = open(in_file, 'r')
fp.readline()
Lines = fp.readlines()
fp.close()
for Line in Lines:
DICT_D[Line[:keylens]] = Line[keylens:]
for Line in bodys:
DICT_D[Line[:keylens]] = Line[keylens:]
newLines = sorted(iter(DICT_D.items()), key=(lambda d: d[0]), reverse=False)
for i in range(len(newLines)):
allLines.append((str(newLines[i][0]) + str(newLines[i][1])))
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(allLines)
fp.close()
else:
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(bodys)
fp.close()
|
def str_format(string, values):
'\n 格式化字符串\n :param string:(str) "DCC: %sat_sensor_Projection_%ymd(分辨率 %resolution 度)"\n :param values:(dict) {"sat_sensor": sat_sensor, "resolution": str(resolution), "ymd": ymd}\n :return: DCC: FY3D+MERSI_Projection_201712(分辨率 1 度)\n '
if (not isinstance(string, str)):
return
for (k, v) in values.items():
string = string.replace(('%' + str(k)), str(v))
return string
| 3,361,385,951,881,129,500
|
格式化字符串
:param string:(str) "DCC: %sat_sensor_Projection_%ymd(分辨率 %resolution 度)"
:param values:(dict) {"sat_sensor": sat_sensor, "resolution": str(resolution), "ymd": ymd}
:return: DCC: FY3D+MERSI_Projection_201712(分辨率 1 度)
|
lib/pb_io.py
|
str_format
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def str_format(string, values):
'\n 格式化字符串\n :param string:(str) "DCC: %sat_sensor_Projection_%ymd(分辨率 %resolution 度)"\n :param values:(dict) {"sat_sensor": sat_sensor, "resolution": str(resolution), "ymd": ymd}\n :return: DCC: FY3D+MERSI_Projection_201712(分辨率 1 度)\n '
if (not isinstance(string, str)):
return
for (k, v) in values.items():
string = string.replace(('%' + str(k)), str(v))
return string
|
def get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None):
'\n :param dir_path: 文件夹\n :param time_start: 开始时间\n :param time_end: 结束时间\n :param ext: 后缀名, \'.hdf5\'\n :param pattern_ymd: 匹配时间的模式, 可以是 r".*(\\d{8})_(\\d{4})_"\n :return: list\n '
files_found = []
if (pattern_ymd is not None):
pattern = pattern_ymd
else:
pattern = '.*(\\d{8})'
for (root, dirs, files) in os.walk(dir_path):
for file_name in files:
if (ext is not None):
if ('.' not in ext):
ext = ('.' + ext)
if (os.path.splitext(file_name)[1].lower() != ext.lower()):
continue
re_result = re.match(pattern, file_name)
if (re_result is not None):
time_file = ''.join(re_result.groups())
else:
continue
if (int(time_start) <= int(time_file) <= int(time_end)):
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
| 344,630,162,736,734,340
|
:param dir_path: 文件夹
:param time_start: 开始时间
:param time_end: 结束时间
:param ext: 后缀名, '.hdf5'
:param pattern_ymd: 匹配时间的模式, 可以是 r".*(\d{8})_(\d{4})_"
:return: list
|
lib/pb_io.py
|
get_files_by_ymd
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None):
'\n :param dir_path: 文件夹\n :param time_start: 开始时间\n :param time_end: 结束时间\n :param ext: 后缀名, \'.hdf5\'\n :param pattern_ymd: 匹配时间的模式, 可以是 r".*(\\d{8})_(\\d{4})_"\n :return: list\n '
files_found = []
if (pattern_ymd is not None):
pattern = pattern_ymd
else:
pattern = '.*(\\d{8})'
for (root, dirs, files) in os.walk(dir_path):
for file_name in files:
if (ext is not None):
if ('.' not in ext):
ext = ('.' + ext)
if (os.path.splitext(file_name)[1].lower() != ext.lower()):
continue
re_result = re.match(pattern, file_name)
if (re_result is not None):
time_file = .join(re_result.groups())
else:
continue
if (int(time_start) <= int(time_file) <= int(time_end)):
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
|
def ymdhms2date(ymd, hms):
'\n ymd = 20180101\n hms = 04:04:04\n '
ymdhms = (ymd + hms)
return datetime.strptime(ymdhms, '%Y%m%d%H:%M:%S')
| 8,144,697,994,038,613,000
|
ymd = 20180101
hms = 04:04:04
|
lib/pb_io.py
|
ymdhms2date
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def ymdhms2date(ymd, hms):
'\n ymd = 20180101\n hms = 04:04:04\n '
ymdhms = (ymd + hms)
return datetime.strptime(ymdhms, '%Y%m%d%H:%M:%S')
|
def get_files_by_date(dir_path, time_start=None, time_end=None, ext=None, pattern=None):
"\n :param dir_path: 文件夹\n :param time_start: 开始时间\n :param time_end: 结束时间\n :param ext: 后缀名, '.hdf5'\n :param pattern: 匹配时间的模式\n :return: list\n "
files_found = []
for (root, dirs, files) in os.walk(dir_path):
for file_name in files:
if (ext is not None):
if ('.' not in ext):
ext = ('.' + ext)
if (os.path.splitext(file_name)[1].lower() != ext.lower()):
continue
if (pattern is not None):
re_result = re.match(pattern, file_name)
if (re_result is None):
continue
if (time_start is not None):
time_file = ''.join(re_result.groups())
if (not (int(time_start) <= int(time_file) <= int(time_end))):
continue
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
| 574,180,708,283,089,540
|
:param dir_path: 文件夹
:param time_start: 开始时间
:param time_end: 结束时间
:param ext: 后缀名, '.hdf5'
:param pattern: 匹配时间的模式
:return: list
|
lib/pb_io.py
|
get_files_by_date
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
def get_files_by_date(dir_path, time_start=None, time_end=None, ext=None, pattern=None):
"\n :param dir_path: 文件夹\n :param time_start: 开始时间\n :param time_end: 结束时间\n :param ext: 后缀名, '.hdf5'\n :param pattern: 匹配时间的模式\n :return: list\n "
files_found = []
for (root, dirs, files) in os.walk(dir_path):
for file_name in files:
if (ext is not None):
if ('.' not in ext):
ext = ('.' + ext)
if (os.path.splitext(file_name)[1].lower() != ext.lower()):
continue
if (pattern is not None):
re_result = re.match(pattern, file_name)
if (re_result is None):
continue
if (time_start is not None):
time_file = .join(re_result.groups())
if (not (int(time_start) <= int(time_file) <= int(time_end))):
continue
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
|
@staticmethod
def read_cross_file(in_file, file_type):
'\n :param in_file:\n :param file_type:\n :return:\n '
data = {'ymdhms1': None, 'ymdhms2': None, 'lon1': None, 'lat1': None, 'lon2': None, 'lat2': None, 'fix_name': None}
if (not os.path.isfile(in_file)):
print('***WARNING***File is not exist: {}'.format(in_file))
return data
if (file_type == 'leo_area'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'), 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif (file_type == 'leo_leo'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'), 'formats': ('S8', 'S8', 'f4', 'f4', 'S8', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d5']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d3']
data['lon1'] = data_raw['d4']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif (file_type == 'leo_fix'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8'), 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d2']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d6']
data['lon1'] = data_raw['d7']
data['lat2'] = data_raw['d4']
data['lon2'] = data_raw['d5']
data['fix_name'] = data_raw['d3']
elif (file_type == 'geo_leo'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'), 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
else:
raise KeyError('Cant handle this file type: {}'.format(file_type))
return data
| -4,966,384,043,776,749,000
|
:param in_file:
:param file_type:
:return:
|
lib/pb_io.py
|
read_cross_file
|
NingAnMe/snow_cover_of_remote_sensing
|
python
|
@staticmethod
def read_cross_file(in_file, file_type):
'\n :param in_file:\n :param file_type:\n :return:\n '
data = {'ymdhms1': None, 'ymdhms2': None, 'lon1': None, 'lat1': None, 'lon2': None, 'lat2': None, 'fix_name': None}
if (not os.path.isfile(in_file)):
print('***WARNING***File is not exist: {}'.format(in_file))
return data
if (file_type == 'leo_area'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'), 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif (file_type == 'leo_leo'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'), 'formats': ('S8', 'S8', 'f4', 'f4', 'S8', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d5']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d3']
data['lon1'] = data_raw['d4']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif (file_type == 'leo_fix'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8'), 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d2']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d6']
data['lon1'] = data_raw['d7']
data['lat2'] = data_raw['d4']
data['lon2'] = data_raw['d5']
data['fix_name'] = data_raw['d3']
elif (file_type == 'geo_leo'):
data_raw = np.loadtxt(in_file, skiprows=10, dtype={'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'), 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if (data_raw.size != 0):
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
else:
raise KeyError('Cant handle this file type: {}'.format(file_type))
return data
|
def move_organ(self, new_location: int, cost: float, shortest_path: shortest_path_structure) -> None:
"\n This function allows an organ's attributes to be altered to represent it's\n transportation across the network. This is intended to be used with\n Dijkstra.shortest_path (this will be the source of the cost parameter)\n\n :param int new_location: node id representing the destination location\n :param cost: weight/cost associated with then most efficient path\n :param list shortest_path: transit path taken when moving organ\n "
if (self.viability < cost):
print('ERROR: organ no longer viable!')
return
(path, weight) = shortest_path
self.path = path
self.current_location = new_location
self.viability -= cost
| -5,861,527,202,163,004,000
|
This function allows an organ's attributes to be altered to represent it's
transportation across the network. This is intended to be used with
Dijkstra.shortest_path (this will be the source of the cost parameter)
:param int new_location: node id representing the destination location
:param cost: weight/cost associated with then most efficient path
:param list shortest_path: transit path taken when moving organ
|
network_simulator/Organ.py
|
move_organ
|
zspatter/Network-Simulation
|
python
|
def move_organ(self, new_location: int, cost: float, shortest_path: shortest_path_structure) -> None:
"\n This function allows an organ's attributes to be altered to represent it's\n transportation across the network. This is intended to be used with\n Dijkstra.shortest_path (this will be the source of the cost parameter)\n\n :param int new_location: node id representing the destination location\n :param cost: weight/cost associated with then most efficient path\n :param list shortest_path: transit path taken when moving organ\n "
if (self.viability < cost):
print('ERROR: organ no longer viable!')
return
(path, weight) = shortest_path
self.path = path
self.current_location = new_location
self.viability -= cost
|
@staticmethod
def get_viability(organ_type: OrganType) -> float:
'\n Gets viability rating for each organ individually\n\n Viability is represented by hours an organ can be out of body * 10\n\n :param int organ_type: constant corresponding to an organ type\n :return: int viability rating (used in __init__())\n '
viability = {OrganType.Heart.value: 60, OrganType.Kidney.value: 300, OrganType.Liver.value: 120, OrganType.Lungs.value: 60, OrganType.Pancreas.value: 120, OrganType.Intestines.value: 80}
return viability[organ_type.value]
| 7,959,193,242,337,898,000
|
Gets viability rating for each organ individually
Viability is represented by hours an organ can be out of body * 10
:param int organ_type: constant corresponding to an organ type
:return: int viability rating (used in __init__())
|
network_simulator/Organ.py
|
get_viability
|
zspatter/Network-Simulation
|
python
|
@staticmethod
def get_viability(organ_type: OrganType) -> float:
'\n Gets viability rating for each organ individually\n\n Viability is represented by hours an organ can be out of body * 10\n\n :param int organ_type: constant corresponding to an organ type\n :return: int viability rating (used in __init__())\n '
viability = {OrganType.Heart.value: 60, OrganType.Kidney.value: 300, OrganType.Liver.value: 120, OrganType.Lungs.value: 60, OrganType.Pancreas.value: 120, OrganType.Intestines.value: 80}
return viability[organ_type.value]
|
def __str__(self) -> str:
'\n Builds an easily readable string representing an organ\n\n :return: str\n '
return f'''Organ:
Organ ID: {'{:05d}'.format(self.organ_id)}
Organ type: {OrganType(self.organ_type).name}
Blood type: {self.blood_type}
Viability: {self.viability}
Origin location: {self.origin_location}
Current location: {self.current_location}
Transit path: {self.path}
'''
| 7,500,228,580,813,923,000
|
Builds an easily readable string representing an organ
:return: str
|
network_simulator/Organ.py
|
__str__
|
zspatter/Network-Simulation
|
python
|
def __str__(self) -> str:
'\n Builds an easily readable string representing an organ\n\n :return: str\n '
return f'Organ:
Organ ID: {'{:05d}'.format(self.organ_id)}
Organ type: {OrganType(self.organ_type).name}
Blood type: {self.blood_type}
Viability: {self.viability}
Origin location: {self.origin_location}
Current location: {self.current_location}
Transit path: {self.path}
'
|
def _sentry_llvm_version():
'\n Make sure we meet min llvmpy version\n '
import warnings
import llvm
min_version = (0, 12, 6)
regex = re.compile('(\\d+)\\.(\\d+).(\\d+)')
m = regex.match(llvm.__version__)
if m:
ver = tuple(map(int, m.groups()))
if (ver < min_version):
msg = ('Numba requires at least version %d.%d.%d of llvmpy.\nInstalled version is %s.\nPlease update llvmpy.' % (min_version + (llvm.__version__,)))
raise ImportError(msg)
else:
warnings.warn('llvmpy version format not recognized!')
| -4,261,973,195,286,988,000
|
Make sure we meet min llvmpy version
|
numba/__init__.py
|
_sentry_llvm_version
|
meawoppl/numba
|
python
|
def _sentry_llvm_version():
'\n \n '
import warnings
import llvm
min_version = (0, 12, 6)
regex = re.compile('(\\d+)\\.(\\d+).(\\d+)')
m = regex.match(llvm.__version__)
if m:
ver = tuple(map(int, m.groups()))
if (ver < min_version):
msg = ('Numba requires at least version %d.%d.%d of llvmpy.\nInstalled version is %s.\nPlease update llvmpy.' % (min_version + (llvm.__version__,)))
raise ImportError(msg)
else:
warnings.warn('llvmpy version format not recognized!')
|
def train(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data)
MSE = torch.mean(((prediction - target) ** 2), dim=0)
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
if (model.estimator_coeffs() is None):
estimator_coeff_vectors = [torch.zeros_like(coeff) for coeff in model.constraint_coeffs(sparse=True, scaled=False)]
else:
estimator_coeff_vectors = model.estimator_coeffs()
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors)
sparsity_scheduler(iteration, torch.sum(l1_norm))
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
| -5,516,086,070,360,717,000
|
[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
|
src/multitaskpinn/training/.ipynb_checkpoints/training-checkpoint.py
|
train
|
GJBoth/MultiTaskPINN
|
python
|
def train(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data)
MSE = torch.mean(((prediction - target) ** 2), dim=0)
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
if (model.estimator_coeffs() is None):
estimator_coeff_vectors = [torch.zeros_like(coeff) for coeff in model.constraint_coeffs(sparse=True, scaled=False)]
else:
estimator_coeff_vectors = model.estimator_coeffs()
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors)
sparsity_scheduler(iteration, torch.sum(l1_norm))
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
|
def train_auto_split(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
n_train = int((split * data.shape[0]))
n_test = (data.shape[0] - n_train)
(data_train, data_test) = torch.split(data, [n_train, n_test], dim=0)
(target_train, target_test) = torch.split(target, [n_train, n_test], dim=0)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data_train)
MSE = torch.mean(((prediction - target_train) ** 2), dim=0)
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
with torch.no_grad():
prediction_test = model.func_approx(data_test)[0]
MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0)
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test)
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
| -557,119,499,946,742,100
|
[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
|
src/multitaskpinn/training/.ipynb_checkpoints/training-checkpoint.py
|
train_auto_split
|
GJBoth/MultiTaskPINN
|
python
|
def train_auto_split(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
n_train = int((split * data.shape[0]))
n_test = (data.shape[0] - n_train)
(data_train, data_test) = torch.split(data, [n_train, n_test], dim=0)
(target_train, target_test) = torch.split(target, [n_train, n_test], dim=0)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data_train)
MSE = torch.mean(((prediction - target_train) ** 2), dim=0)
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
with torch.no_grad():
prediction_test = model.func_approx(data_test)[0]
MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0)
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test)
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
|
def train_auto_split_scaled(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
n_train = int((split * data.shape[0]))
n_test = (data.shape[0] - n_train)
(data_train, data_test) = torch.split(data, [n_train, n_test], dim=0)
(target_train, target_test) = torch.split(target, [n_train, n_test], dim=0)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data_train)
MSE = torch.mean(((prediction - target_train) ** 2), dim=0)
theta_norms = [torch.norm(theta, dim=0) for theta in thetas]
time_deriv_norms = [torch.norm(dt, dim=0) for dt in time_derivs]
normed_thetas = [(theta / norm) for (theta, norm) in zip(thetas, theta_norms)]
normed_time_derivs = [(dt / norm) for (dt, norm) in zip(time_derivs, time_deriv_norms)]
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(normed_time_derivs, normed_thetas, model.constraint_coeffs(scaled=True, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
(prediction_test, coordinates) = model.func_approx(data_test)
(time_derivs_test, thetas_test) = model.library((prediction_test, coordinates))
MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0)
Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum((MSE_test + Reg_test))
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
sparsity_scheduler(loss_test, model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
checkpoint = torch.load(sparsity_scheduler.path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
| -7,852,028,519,855,341,000
|
[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
|
src/multitaskpinn/training/.ipynb_checkpoints/training-checkpoint.py
|
train_auto_split_scaled
|
GJBoth/MultiTaskPINN
|
python
|
def train_auto_split_scaled(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
n_train = int((split * data.shape[0]))
n_test = (data.shape[0] - n_train)
(data_train, data_test) = torch.split(data, [n_train, n_test], dim=0)
(target_train, target_test) = torch.split(target, [n_train, n_test], dim=0)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data_train)
MSE = torch.mean(((prediction - target_train) ** 2), dim=0)
theta_norms = [torch.norm(theta, dim=0) for theta in thetas]
time_deriv_norms = [torch.norm(dt, dim=0) for dt in time_derivs]
normed_thetas = [(theta / norm) for (theta, norm) in zip(thetas, theta_norms)]
normed_time_derivs = [(dt / norm) for (dt, norm) in zip(time_derivs, time_deriv_norms)]
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(normed_time_derivs, normed_thetas, model.constraint_coeffs(scaled=True, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
(prediction_test, coordinates) = model.func_approx(data_test)
(time_derivs_test, thetas_test) = model.library((prediction_test, coordinates))
MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0)
Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum((MSE_test + Reg_test))
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
sparsity_scheduler(loss_test, model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
checkpoint = torch.load(sparsity_scheduler.path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
|
def train_auto_split_MSE(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data)
MSE = torch.mean(((prediction - target) ** 2), dim=0)
loss = torch.sum(MSE)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint.coeff_vectors, dim=1)), dim=0)
prediction_test = model.func_approx(data)[0]
MSE_test = torch.mean(((prediction_test - target) ** 2), dim=0)
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(MSE).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, MSE, l1_norm, model.constraint.coeff_vectors, model.constraint.coeff_vectors, estimator_coeff_vectors, MSE_test=MSE_test)
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
| -2,845,149,371,688,520,700
|
[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
|
src/multitaskpinn/training/.ipynb_checkpoints/training-checkpoint.py
|
train_auto_split_MSE
|
GJBoth/MultiTaskPINN
|
python
|
def train_auto_split_MSE(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data)
MSE = torch.mean(((prediction - target) ** 2), dim=0)
loss = torch.sum(MSE)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint.coeff_vectors, dim=1)), dim=0)
prediction_test = model.func_approx(data)[0]
MSE_test = torch.mean(((prediction_test - target) ** 2), dim=0)
if ((iteration % write_iterations) == 0):
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(MSE).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, MSE, l1_norm, model.constraint.coeff_vectors, model.constraint.coeff_vectors, estimator_coeff_vectors, MSE_test=MSE_test)
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
|
def train_split_full(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
n_train = int((split * data.shape[0]))
n_test = (data.shape[0] - n_train)
(data_train, data_test) = torch.split(data, [n_train, n_test], dim=0)
(target_train, target_test) = torch.split(target, [n_train, n_test], dim=0)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data_train)
MSE = torch.mean(((prediction - target_train) ** 2), dim=0)
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((iteration % write_iterations) == 0):
(prediction_test, coordinates) = model.func_approx(data_test)
(time_derivs_test, thetas_test) = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0)
Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum((MSE_test + Reg_test))
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
if ((iteration % write_iterations) == 0):
if (test == 'mse'):
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
else:
sparsity_scheduler(iteration, loss_test, model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
| -5,188,684,247,511,856,000
|
[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
|
src/multitaskpinn/training/.ipynb_checkpoints/training-checkpoint.py
|
train_split_full
|
GJBoth/MultiTaskPINN
|
python
|
def train_split_full(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None:
'[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n '
start_time = time.time()
board = Tensorboard(log_dir)
n_train = int((split * data.shape[0]))
n_test = (data.shape[0] - n_train)
(data_train, data_test) = torch.split(data, [n_train, n_test], dim=0)
(target_train, target_test) = torch.split(target, [n_train, n_test], dim=0)
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, (max_iterations + 1)):
(prediction, time_derivs, thetas) = model(data_train)
MSE = torch.mean(((prediction - target_train) ** 2), dim=0)
Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum((MSE + Reg))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((iteration % write_iterations) == 0):
(prediction_test, coordinates) = model.func_approx(data_test)
(time_derivs_test, thetas_test) = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0)
Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum((MSE_test + Reg_test))
_ = model.sparse_estimator(thetas, time_derivs)
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
if ((iteration % write_iterations) == 0):
if (test == 'mse'):
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
else:
sparsity_scheduler(iteration, loss_test, model, optimizer)
if (sparsity_scheduler.apply_sparsity is True):
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
convergence(iteration, torch.sum(l1_norm))
if (convergence.converged is True):
print('Algorithm converged. Stopping training.')
break
board.close()
|
def _verify_error(res: int):
'\n Validate k4a_module result\n '
res = Result(res)
if (res == Result.Failed):
raise K4AException()
elif (res == Result.Timeout):
raise K4ATimeoutException()
| -5,525,237,422,260,999,000
|
Validate k4a_module result
|
deepclaw/driver/sensors/camera/pyk4a_cfg/errors.py
|
_verify_error
|
1079931505/ME336-Yellow-Team-SUSTech
|
python
|
def _verify_error(res: int):
'\n \n '
res = Result(res)
if (res == Result.Failed):
raise K4AException()
elif (res == Result.Timeout):
raise K4ATimeoutException()
|
def __init__(self, session, object_factory, request_validator):
'Initialize a new SxpConnections\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the Identity Services Engine service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n '
check_type(session, RestSession)
super(SxpConnections, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
| -4,665,961,750,954,259,000
|
Initialize a new SxpConnections
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Identity Services Engine service.
Raises:
TypeError: If the parameter types are incorrect.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
__init__
|
CiscoISE/ciscoisesdk
|
python
|
def __init__(self, session, object_factory, request_validator):
'Initialize a new SxpConnections\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the Identity Services Engine service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n '
check_type(session, RestSession)
super(SxpConnections, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
|
def get_sxp_connections_by_id(self, id, headers=None, **query_parameters):
"This API allows the client to get a SXP connection by ID.\n\n Args:\n id(basestring): id path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(id, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'id': id}
e_url = '/ers/config/sxpconnections/{id}'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a5b160a5675039b7ddf3dc960c7968_v3_0_0', _api_response)
| 3,230,320,443,717,295,600
|
This API allows the client to get a SXP connection by ID.
Args:
id(basestring): id path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
get_sxp_connections_by_id
|
CiscoISE/ciscoisesdk
|
python
|
def get_sxp_connections_by_id(self, id, headers=None, **query_parameters):
"This API allows the client to get a SXP connection by ID.\n\n Args:\n id(basestring): id path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(id, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'id': id}
e_url = '/ers/config/sxpconnections/{id}'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a5b160a5675039b7ddf3dc960c7968_v3_0_0', _api_response)
|
def get_by_id(self, id, headers=None, **query_parameters):
'Alias for `get_sxp_connections_by_id <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.get_sxp_connections_by_id>`_\n '
return self.get_sxp_connections_by_id(id=id, headers=headers, **query_parameters)
| 7,004,977,335,190,405,000
|
Alias for `get_sxp_connections_by_id <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.get_sxp_connections_by_id>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
get_by_id
|
CiscoISE/ciscoisesdk
|
python
|
def get_by_id(self, id, headers=None, **query_parameters):
'Alias for `get_sxp_connections_by_id <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.get_sxp_connections_by_id>`_\n '
return self.get_sxp_connections_by_id(id=id, headers=headers, **query_parameters)
|
def update_sxp_connections_by_id(self, id, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
"This API allows the client to update a SXP connection.\n\n Args:\n description(string): description, property of the\n request body.\n enabled(boolean): enabled, property of the request body.\n id(string): id, property of the request body.\n ip_address(string): ipAddress, property of the request\n body.\n sxp_mode(string): sxpMode, property of the request body.\n sxp_node(string): sxpNode, property of the request body.\n sxp_peer(string): sxpPeer, property of the request body.\n sxp_version(string): sxpVersion, property of the request\n body.\n sxp_vpn(string): sxpVpn, property of the request body.\n id(basestring): id path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = ('application/xml' in _headers.get('Content-Type', []))
if (active_validation and is_xml_payload):
check_type(payload, basestring)
if (active_validation and (not is_xml_payload)):
check_type(payload, dict)
check_type(id, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'id': id}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {'id': id, 'description': description, 'sxpPeer': sxp_peer, 'sxpVpn': sxp_vpn, 'sxpNode': sxp_node, 'ipAddress': ip_address, 'sxpMode': sxp_mode, 'sxpVersion': sxp_version, 'enabled': enabled}
_payload = {'ERSSxpConnection': dict_from_items_with_values(_tmp_payload)}
_payload.update((payload or {}))
_payload = dict_from_items_with_values(_payload)
if (active_validation and (not is_xml_payload)):
self._request_validator('jsd_cab8440e21553c3a807d23d05e5e1aa_v3_0_0').validate(_payload)
e_url = '/ers/config/sxpconnections/{id}'
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = ({'data': _payload} if is_xml_payload else {'json': _payload})
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params, headers=_headers, **request_params)
else:
_api_response = self._session.put(endpoint_full_url, params=_params, **request_params)
return self._object_factory('bpm_cab8440e21553c3a807d23d05e5e1aa_v3_0_0', _api_response)
| 5,528,890,898,895,734,000
|
This API allows the client to update a SXP connection.
Args:
description(string): description, property of the
request body.
enabled(boolean): enabled, property of the request body.
id(string): id, property of the request body.
ip_address(string): ipAddress, property of the request
body.
sxp_mode(string): sxpMode, property of the request body.
sxp_node(string): sxpNode, property of the request body.
sxp_peer(string): sxpPeer, property of the request body.
sxp_version(string): sxpVersion, property of the request
body.
sxp_vpn(string): sxpVpn, property of the request body.
id(basestring): id path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
update_sxp_connections_by_id
|
CiscoISE/ciscoisesdk
|
python
|
def update_sxp_connections_by_id(self, id, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
"This API allows the client to update a SXP connection.\n\n Args:\n description(string): description, property of the\n request body.\n enabled(boolean): enabled, property of the request body.\n id(string): id, property of the request body.\n ip_address(string): ipAddress, property of the request\n body.\n sxp_mode(string): sxpMode, property of the request body.\n sxp_node(string): sxpNode, property of the request body.\n sxp_peer(string): sxpPeer, property of the request body.\n sxp_version(string): sxpVersion, property of the request\n body.\n sxp_vpn(string): sxpVpn, property of the request body.\n id(basestring): id path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = ('application/xml' in _headers.get('Content-Type', []))
if (active_validation and is_xml_payload):
check_type(payload, basestring)
if (active_validation and (not is_xml_payload)):
check_type(payload, dict)
check_type(id, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'id': id}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {'id': id, 'description': description, 'sxpPeer': sxp_peer, 'sxpVpn': sxp_vpn, 'sxpNode': sxp_node, 'ipAddress': ip_address, 'sxpMode': sxp_mode, 'sxpVersion': sxp_version, 'enabled': enabled}
_payload = {'ERSSxpConnection': dict_from_items_with_values(_tmp_payload)}
_payload.update((payload or {}))
_payload = dict_from_items_with_values(_payload)
if (active_validation and (not is_xml_payload)):
self._request_validator('jsd_cab8440e21553c3a807d23d05e5e1aa_v3_0_0').validate(_payload)
e_url = '/ers/config/sxpconnections/{id}'
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = ({'data': _payload} if is_xml_payload else {'json': _payload})
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params, headers=_headers, **request_params)
else:
_api_response = self._session.put(endpoint_full_url, params=_params, **request_params)
return self._object_factory('bpm_cab8440e21553c3a807d23d05e5e1aa_v3_0_0', _api_response)
|
def update_by_id(self, id, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
'Alias for `update_sxp_connections_by_id <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.update_sxp_connections_by_id>`_\n '
return self.update_sxp_connections_by_id(id=id, description=description, enabled=enabled, ip_address=ip_address, sxp_mode=sxp_mode, sxp_node=sxp_node, sxp_peer=sxp_peer, sxp_version=sxp_version, sxp_vpn=sxp_vpn, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
| -3,714,477,195,803,711,000
|
Alias for `update_sxp_connections_by_id <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.update_sxp_connections_by_id>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
update_by_id
|
CiscoISE/ciscoisesdk
|
python
|
def update_by_id(self, id, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
'Alias for `update_sxp_connections_by_id <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.update_sxp_connections_by_id>`_\n '
return self.update_sxp_connections_by_id(id=id, description=description, enabled=enabled, ip_address=ip_address, sxp_mode=sxp_mode, sxp_node=sxp_node, sxp_peer=sxp_peer, sxp_version=sxp_version, sxp_vpn=sxp_vpn, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
|
def delete_sxp_connections_by_id(self, id, headers=None, **query_parameters):
"This API deletes a SXP connection.\n\n Args:\n id(basestring): id path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(id, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'id': id}
e_url = '/ers/config/sxpconnections/{id}'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.delete(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_fb665776b98ba815b52515a6_v3_0_0', _api_response)
| 8,403,939,434,304,790,000
|
This API deletes a SXP connection.
Args:
id(basestring): id path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
delete_sxp_connections_by_id
|
CiscoISE/ciscoisesdk
|
python
|
def delete_sxp_connections_by_id(self, id, headers=None, **query_parameters):
"This API deletes a SXP connection.\n\n Args:\n id(basestring): id path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(id, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'id': id}
e_url = '/ers/config/sxpconnections/{id}'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.delete(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_fb665776b98ba815b52515a6_v3_0_0', _api_response)
|
def delete_by_id(self, id, headers=None, **query_parameters):
'Alias for `delete_sxp_connections_by_id <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.delete_sxp_connections_by_id>`_\n '
return self.delete_sxp_connections_by_id(id=id, headers=headers, **query_parameters)
| 1,913,868,147,716,121,000
|
Alias for `delete_sxp_connections_by_id <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.delete_sxp_connections_by_id>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
delete_by_id
|
CiscoISE/ciscoisesdk
|
python
|
def delete_by_id(self, id, headers=None, **query_parameters):
'Alias for `delete_sxp_connections_by_id <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.delete_sxp_connections_by_id>`_\n '
return self.delete_sxp_connections_by_id(id=id, headers=headers, **query_parameters)
|
def get_sxp_connections(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'This API allows the client to get all the SXP connections.\n Filter: [name, description] To search resources by\n using toDate column,follow the format: DD-MON-YY\n (Example:13-SEP-18) Day or Year:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13\n Month:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.SEP\n Date:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18\n Sorting: [name, description].\n\n Args:\n page(int): page query parameter. Page number.\n size(int): size query parameter. Number of objects\n returned per page.\n sortasc(basestring): sortasc query parameter. sort asc.\n sortdsc(basestring): sortdsc query parameter. sort desc.\n filter(basestring, list, set, tuple): filter query\n parameter. **Simple\n filtering** should be available through\n the filter query string parameter. The\n structure of a filter is a triplet of\n field operator and value separated with\n dots. More than one filter can be sent.\n The logical operator common to ALL\n filter criteria will be by default AND,\n and can be changed by using the\n "filterType=or" query string parameter.\n Each resource Data model description\n should specify if an attribute is a\n filtered field. (Operator:\n Description),\n (EQ: Equals), (NEQ: Not\n Equals), (GT: Greater\n Than), (LT: Less Then),\n (STARTSW: Starts With),\n (NSTARTSW: Not Starts With),\n (ENDSW: Ends With),\n (NENDSW: Not Ends With),\n (CONTAINS: Contains),\n (NCONTAINS: Not Contains),\n .\n filter_type(basestring): filterType query parameter. The\n logical operator common to ALL filter\n criteria will be by default AND, and can\n be changed by using the parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object\'s properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request\'s response\n - text(str): representation of the request\'s response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n '
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(page, (int, basestring, list))
check_type(size, (int, basestring, list))
check_type(sortasc, basestring)
check_type(sortdsc, basestring)
check_type(filter, (basestring, list, set, tuple))
check_type(filter_type, basestring)
_params = {'page': page, 'size': size, 'sortasc': sortasc, 'sortdsc': sortdsc, 'filter': filter, 'filterType': filter_type}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
e_url = '/ers/config/sxpconnections'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c56dfcff6285f9b882c884873d5d6c1_v3_0_0', _api_response)
| 5,598,542,891,324,423,000
|
This API allows the client to get all the SXP connections.
Filter: [name, description] To search resources by
using toDate column,follow the format: DD-MON-YY
(Example:13-SEP-18) Day or Year:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13
Month:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.SEP
Date:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18
Sorting: [name, description].
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sortasc(basestring): sortasc query parameter. sort asc.
sortdsc(basestring): sortdsc query parameter. sort desc.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
get_sxp_connections
|
CiscoISE/ciscoisesdk
|
python
|
def get_sxp_connections(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'This API allows the client to get all the SXP connections.\n Filter: [name, description] To search resources by\n using toDate column,follow the format: DD-MON-YY\n (Example:13-SEP-18) Day or Year:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13\n Month:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.SEP\n Date:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18\n Sorting: [name, description].\n\n Args:\n page(int): page query parameter. Page number.\n size(int): size query parameter. Number of objects\n returned per page.\n sortasc(basestring): sortasc query parameter. sort asc.\n sortdsc(basestring): sortdsc query parameter. sort desc.\n filter(basestring, list, set, tuple): filter query\n parameter. **Simple\n filtering** should be available through\n the filter query string parameter. The\n structure of a filter is a triplet of\n field operator and value separated with\n dots. More than one filter can be sent.\n The logical operator common to ALL\n filter criteria will be by default AND,\n and can be changed by using the\n "filterType=or" query string parameter.\n Each resource Data model description\n should specify if an attribute is a\n filtered field. (Operator:\n Description),\n (EQ: Equals), (NEQ: Not\n Equals), (GT: Greater\n Than), (LT: Less Then),\n (STARTSW: Starts With),\n (NSTARTSW: Not Starts With),\n (ENDSW: Ends With),\n (NENDSW: Not Ends With),\n (CONTAINS: Contains),\n (NCONTAINS: Not Contains),\n .\n filter_type(basestring): filterType query parameter. The\n logical operator common to ALL filter\n criteria will be by default AND, and can\n be changed by using the parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object\'s properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request\'s response\n - text(str): representation of the request\'s response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n '
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(page, (int, basestring, list))
check_type(size, (int, basestring, list))
check_type(sortasc, basestring)
check_type(sortdsc, basestring)
check_type(filter, (basestring, list, set, tuple))
check_type(filter_type, basestring)
_params = {'page': page, 'size': size, 'sortasc': sortasc, 'sortdsc': sortdsc, 'filter': filter, 'filterType': filter_type}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
e_url = '/ers/config/sxpconnections'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c56dfcff6285f9b882c884873d5d6c1_v3_0_0', _api_response)
|
def get_all(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'Alias for `get_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.get_sxp_connections>`_\n '
return self.get_sxp_connections(filter=filter, filter_type=filter_type, page=page, size=size, sortasc=sortasc, sortdsc=sortdsc, headers=headers, **query_parameters)
| -5,183,008,879,214,946,000
|
Alias for `get_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.get_sxp_connections>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
get_all
|
CiscoISE/ciscoisesdk
|
python
|
def get_all(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'Alias for `get_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.get_sxp_connections>`_\n '
return self.get_sxp_connections(filter=filter, filter_type=filter_type, page=page, size=size, sortasc=sortasc, sortdsc=sortdsc, headers=headers, **query_parameters)
|
def get_sxp_connections_generator(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'This API allows the client to get all the SXP connections.\n Filter: [name, description] To search resources by\n using toDate column,follow the format: DD-MON-YY\n (Example:13-SEP-18) Day or Year:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13\n Month:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.SEP\n Date:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18\n Sorting: [name, description].\n\n Args:\n page(int): page query parameter. Page number.\n size(int): size query parameter. Number of objects\n returned per page.\n sortasc(basestring): sortasc query parameter. sort asc.\n sortdsc(basestring): sortdsc query parameter. sort desc.\n filter(basestring, list, set, tuple): filter query\n parameter. **Simple\n filtering** should be available through\n the filter query string parameter. The\n structure of a filter is a triplet of\n field operator and value separated with\n dots. More than one filter can be sent.\n The logical operator common to ALL\n filter criteria will be by default AND,\n and can be changed by using the\n "filterType=or" query string parameter.\n Each resource Data model description\n should specify if an attribute is a\n filtered field. (Operator:\n Description),\n (EQ: Equals), (NEQ: Not\n Equals), (GT: Greater\n Than), (LT: Less Then),\n (STARTSW: Starts With),\n (NSTARTSW: Not Starts With),\n (ENDSW: Ends With),\n (NENDSW: Not Ends With),\n (CONTAINS: Contains),\n (NCONTAINS: Not Contains),\n .\n filter_type(basestring): filterType query parameter. The\n logical operator common to ALL filter\n criteria will be by default AND, and can\n be changed by using the parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n Generator: A generator object containing the following object.\n\n + RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object\'s properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request\'s response\n - text(str): representation of the request\'s response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n '
(yield from get_next_page(self.get_sxp_connections, dict(filter=filter, filter_type=filter_type, page=page, size=size, sortasc=sortasc, sortdsc=sortdsc, headers=headers, **query_parameters), access_next_list=['SearchResult', 'nextPage', 'href'], access_resource_list=['SearchResult', 'resources']))
| 1,249,936,647,909,373,700
|
This API allows the client to get all the SXP connections.
Filter: [name, description] To search resources by
using toDate column,follow the format: DD-MON-YY
(Example:13-SEP-18) Day or Year:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13
Month:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.SEP
Date:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18
Sorting: [name, description].
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sortasc(basestring): sortasc query parameter. sort asc.
sortdsc(basestring): sortdsc query parameter. sort desc.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
Generator: A generator object containing the following object.
+ RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
get_sxp_connections_generator
|
CiscoISE/ciscoisesdk
|
python
|
def get_sxp_connections_generator(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'This API allows the client to get all the SXP connections.\n Filter: [name, description] To search resources by\n using toDate column,follow the format: DD-MON-YY\n (Example:13-SEP-18) Day or Year:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13\n Month:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.SEP\n Date:GET\n /ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18\n Sorting: [name, description].\n\n Args:\n page(int): page query parameter. Page number.\n size(int): size query parameter. Number of objects\n returned per page.\n sortasc(basestring): sortasc query parameter. sort asc.\n sortdsc(basestring): sortdsc query parameter. sort desc.\n filter(basestring, list, set, tuple): filter query\n parameter. **Simple\n filtering** should be available through\n the filter query string parameter. The\n structure of a filter is a triplet of\n field operator and value separated with\n dots. More than one filter can be sent.\n The logical operator common to ALL\n filter criteria will be by default AND,\n and can be changed by using the\n "filterType=or" query string parameter.\n Each resource Data model description\n should specify if an attribute is a\n filtered field. (Operator:\n Description),\n (EQ: Equals), (NEQ: Not\n Equals), (GT: Greater\n Than), (LT: Less Then),\n (STARTSW: Starts With),\n (NSTARTSW: Not Starts With),\n (ENDSW: Ends With),\n (NENDSW: Not Ends With),\n (CONTAINS: Contains),\n (NCONTAINS: Not Contains),\n .\n filter_type(basestring): filterType query parameter. The\n logical operator common to ALL filter\n criteria will be by default AND, and can\n be changed by using the parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n Generator: A generator object containing the following object.\n\n + RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object\'s properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request\'s response\n - text(str): representation of the request\'s response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n '
(yield from get_next_page(self.get_sxp_connections, dict(filter=filter, filter_type=filter_type, page=page, size=size, sortasc=sortasc, sortdsc=sortdsc, headers=headers, **query_parameters), access_next_list=['SearchResult', 'nextPage', 'href'], access_resource_list=['SearchResult', 'resources']))
|
def get_all_generator(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'Alias for `get_sxp_connections_generator <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.get_sxp_connections_generator>`_\n '
(yield from get_next_page(self.get_sxp_connections, dict(filter=filter, filter_type=filter_type, page=page, size=size, sortasc=sortasc, sortdsc=sortdsc, headers=headers, **query_parameters), access_next_list=['SearchResult', 'nextPage', 'href'], access_resource_list=['SearchResult', 'resources']))
| -806,000,813,169,550,700
|
Alias for `get_sxp_connections_generator <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.get_sxp_connections_generator>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
get_all_generator
|
CiscoISE/ciscoisesdk
|
python
|
def get_all_generator(self, filter=None, filter_type=None, page=None, size=None, sortasc=None, sortdsc=None, headers=None, **query_parameters):
'Alias for `get_sxp_connections_generator <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.get_sxp_connections_generator>`_\n '
(yield from get_next_page(self.get_sxp_connections, dict(filter=filter, filter_type=filter_type, page=page, size=size, sortasc=sortasc, sortdsc=sortdsc, headers=headers, **query_parameters), access_next_list=['SearchResult', 'nextPage', 'href'], access_resource_list=['SearchResult', 'resources']))
|
def create_sxp_connections(self, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
"This API creates a SXP connection.\n\n Args:\n description(string): description, property of the\n request body.\n enabled(boolean): enabled, property of the request body.\n ip_address(string): ipAddress, property of the request\n body.\n sxp_mode(string): sxpMode, property of the request body.\n sxp_node(string): sxpNode, property of the request body.\n sxp_peer(string): sxpPeer, property of the request body.\n sxp_version(string): sxpVersion, property of the request\n body.\n sxp_vpn(string): sxpVpn, property of the request body.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = ('application/xml' in _headers.get('Content-Type', []))
if (active_validation and is_xml_payload):
check_type(payload, basestring)
if (active_validation and (not is_xml_payload)):
check_type(payload, dict)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {'description': description, 'sxpPeer': sxp_peer, 'sxpVpn': sxp_vpn, 'sxpNode': sxp_node, 'ipAddress': ip_address, 'sxpMode': sxp_mode, 'sxpVersion': sxp_version, 'enabled': enabled}
_payload = {'ERSSxpConnection': dict_from_items_with_values(_tmp_payload)}
_payload.update((payload or {}))
_payload = dict_from_items_with_values(_payload)
if (active_validation and (not is_xml_payload)):
self._request_validator('jsd_c371214c759f791c0a522b9eaf5b5_v3_0_0').validate(_payload)
e_url = '/ers/config/sxpconnections'
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = ({'data': _payload} if is_xml_payload else {'json': _payload})
if with_custom_headers:
_api_response = self._session.post(endpoint_full_url, params=_params, headers=_headers, **request_params)
else:
_api_response = self._session.post(endpoint_full_url, params=_params, **request_params)
return self._object_factory('bpm_c371214c759f791c0a522b9eaf5b5_v3_0_0', _api_response)
| 8,456,330,866,147,297,000
|
This API creates a SXP connection.
Args:
description(string): description, property of the
request body.
enabled(boolean): enabled, property of the request body.
ip_address(string): ipAddress, property of the request
body.
sxp_mode(string): sxpMode, property of the request body.
sxp_node(string): sxpNode, property of the request body.
sxp_peer(string): sxpPeer, property of the request body.
sxp_version(string): sxpVersion, property of the request
body.
sxp_vpn(string): sxpVpn, property of the request body.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
create_sxp_connections
|
CiscoISE/ciscoisesdk
|
python
|
def create_sxp_connections(self, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
"This API creates a SXP connection.\n\n Args:\n description(string): description, property of the\n request body.\n enabled(boolean): enabled, property of the request body.\n ip_address(string): ipAddress, property of the request\n body.\n sxp_mode(string): sxpMode, property of the request body.\n sxp_node(string): sxpNode, property of the request body.\n sxp_peer(string): sxpPeer, property of the request body.\n sxp_version(string): sxpVersion, property of the request\n body.\n sxp_vpn(string): sxpVpn, property of the request body.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
if ('ERS-Media-Type' in headers):
check_type(headers.get('ERS-Media-Type'), basestring)
if ('X-CSRF-TOKEN' in headers):
check_type(headers.get('X-CSRF-TOKEN'), basestring)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = ('application/xml' in _headers.get('Content-Type', []))
if (active_validation and is_xml_payload):
check_type(payload, basestring)
if (active_validation and (not is_xml_payload)):
check_type(payload, dict)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {'description': description, 'sxpPeer': sxp_peer, 'sxpVpn': sxp_vpn, 'sxpNode': sxp_node, 'ipAddress': ip_address, 'sxpMode': sxp_mode, 'sxpVersion': sxp_version, 'enabled': enabled}
_payload = {'ERSSxpConnection': dict_from_items_with_values(_tmp_payload)}
_payload.update((payload or {}))
_payload = dict_from_items_with_values(_payload)
if (active_validation and (not is_xml_payload)):
self._request_validator('jsd_c371214c759f791c0a522b9eaf5b5_v3_0_0').validate(_payload)
e_url = '/ers/config/sxpconnections'
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = ({'data': _payload} if is_xml_payload else {'json': _payload})
if with_custom_headers:
_api_response = self._session.post(endpoint_full_url, params=_params, headers=_headers, **request_params)
else:
_api_response = self._session.post(endpoint_full_url, params=_params, **request_params)
return self._object_factory('bpm_c371214c759f791c0a522b9eaf5b5_v3_0_0', _api_response)
|
def create(self, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
'Alias for `create_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.create_sxp_connections>`_\n '
return self.create_sxp_connections(description=description, enabled=enabled, ip_address=ip_address, sxp_mode=sxp_mode, sxp_node=sxp_node, sxp_peer=sxp_peer, sxp_version=sxp_version, sxp_vpn=sxp_vpn, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
| 8,035,656,819,289,710,000
|
Alias for `create_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.create_sxp_connections>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
create
|
CiscoISE/ciscoisesdk
|
python
|
def create(self, description=None, enabled=None, ip_address=None, sxp_mode=None, sxp_node=None, sxp_peer=None, sxp_version=None, sxp_vpn=None, headers=None, payload=None, active_validation=True, **query_parameters):
'Alias for `create_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.create_sxp_connections>`_\n '
return self.create_sxp_connections(description=description, enabled=enabled, ip_address=ip_address, sxp_mode=sxp_mode, sxp_node=sxp_node, sxp_peer=sxp_peer, sxp_version=sxp_version, sxp_vpn=sxp_vpn, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
|
def get_version(self, headers=None, **query_parameters):
"This API helps to retrieve the version information related to\n the SXP connections.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
e_url = '/ers/config/sxpconnections/versioninfo'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c1ceea62877152f6a4cf7ce709f4d0f8_v3_0_0', _api_response)
| 3,095,936,805,751,979,500
|
This API helps to retrieve the version information related to
the SXP connections.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
get_version
|
CiscoISE/ciscoisesdk
|
python
|
def get_version(self, headers=None, **query_parameters):
"This API helps to retrieve the version information related to\n the SXP connections.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
e_url = '/ers/config/sxpconnections/versioninfo'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c1ceea62877152f6a4cf7ce709f4d0f8_v3_0_0', _api_response)
|
def bulk_request_for_sxp_connections(self, operation_type=None, resource_media_type=None, headers=None, payload=None, active_validation=True, **query_parameters):
"This API allows the client to submit the bulk request.\n\n Args:\n operation_type(string): operationType, property of the\n request body.\n resource_media_type(string): resourceMediaType, property\n of the request body.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = ('application/xml' in _headers.get('Content-Type', []))
if (active_validation and is_xml_payload):
check_type(payload, basestring)
if (active_validation and (not is_xml_payload)):
check_type(payload, dict)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {'operationType': operation_type, 'resourceMediaType': resource_media_type}
_payload = {'ConnectionBulkRequest': dict_from_items_with_values(_tmp_payload)}
_payload.update((payload or {}))
_payload = dict_from_items_with_values(_payload)
if (active_validation and (not is_xml_payload)):
self._request_validator('jsd_e390313557e95aa9b8c2453d6f1de1e8_v3_0_0').validate(_payload)
e_url = '/ers/config/sxpconnections/bulk/submit'
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = ({'data': _payload} if is_xml_payload else {'json': _payload})
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params, headers=_headers, **request_params)
else:
_api_response = self._session.put(endpoint_full_url, params=_params, **request_params)
return self._object_factory('bpm_e390313557e95aa9b8c2453d6f1de1e8_v3_0_0', _api_response)
| -40,799,253,086,491,080
|
This API allows the client to submit the bulk request.
Args:
operation_type(string): operationType, property of the
request body.
resource_media_type(string): resourceMediaType, property
of the request body.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
bulk_request_for_sxp_connections
|
CiscoISE/ciscoisesdk
|
python
|
def bulk_request_for_sxp_connections(self, operation_type=None, resource_media_type=None, headers=None, payload=None, active_validation=True, **query_parameters):
"This API allows the client to submit the bulk request.\n\n Args:\n operation_type(string): operationType, property of the\n request body.\n resource_media_type(string): resourceMediaType, property\n of the request body.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = ('application/xml' in _headers.get('Content-Type', []))
if (active_validation and is_xml_payload):
check_type(payload, basestring)
if (active_validation and (not is_xml_payload)):
check_type(payload, dict)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {'operationType': operation_type, 'resourceMediaType': resource_media_type}
_payload = {'ConnectionBulkRequest': dict_from_items_with_values(_tmp_payload)}
_payload.update((payload or {}))
_payload = dict_from_items_with_values(_payload)
if (active_validation and (not is_xml_payload)):
self._request_validator('jsd_e390313557e95aa9b8c2453d6f1de1e8_v3_0_0').validate(_payload)
e_url = '/ers/config/sxpconnections/bulk/submit'
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = ({'data': _payload} if is_xml_payload else {'json': _payload})
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params, headers=_headers, **request_params)
else:
_api_response = self._session.put(endpoint_full_url, params=_params, **request_params)
return self._object_factory('bpm_e390313557e95aa9b8c2453d6f1de1e8_v3_0_0', _api_response)
|
def bulk_request(self, operation_type=None, resource_media_type=None, headers=None, payload=None, active_validation=True, **query_parameters):
'Alias for `bulk_request_for_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.bulk_request_for_sxp_connections>`_\n '
return self.bulk_request_for_sxp_connections(operation_type=operation_type, resource_media_type=resource_media_type, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
| -2,038,944,062,834,316,800
|
Alias for `bulk_request_for_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.bulk_request_for_sxp_connections>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
bulk_request
|
CiscoISE/ciscoisesdk
|
python
|
def bulk_request(self, operation_type=None, resource_media_type=None, headers=None, payload=None, active_validation=True, **query_parameters):
'Alias for `bulk_request_for_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.bulk_request_for_sxp_connections>`_\n '
return self.bulk_request_for_sxp_connections(operation_type=operation_type, resource_media_type=resource_media_type, payload=payload, active_validation=active_validation, headers=headers, **query_parameters)
|
def monitor_bulk_status_sxp_connections(self, bulkid, headers=None, **query_parameters):
"This API allows the client to monitor the bulk request.\n\n Args:\n bulkid(basestring): bulkid path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(bulkid, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'bulkid': bulkid}
e_url = '/ers/config/sxpconnections/bulk/{bulkid}'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c2fb20ca5eb79facdda896457507_v3_0_0', _api_response)
| 6,279,486,229,551,649,000
|
This API allows the client to monitor the bulk request.
Args:
bulkid(basestring): bulkid path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
monitor_bulk_status_sxp_connections
|
CiscoISE/ciscoisesdk
|
python
|
def monitor_bulk_status_sxp_connections(self, bulkid, headers=None, **query_parameters):
"This API allows the client to monitor the bulk request.\n\n Args:\n bulkid(basestring): bulkid path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n "
check_type(headers, dict)
if (headers is not None):
if ('Content-Type' in headers):
check_type(headers.get('Content-Type'), basestring, may_be_none=False)
if ('Accept' in headers):
check_type(headers.get('Accept'), basestring, may_be_none=False)
with_custom_headers = False
_headers = (self._session.headers or {})
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(bulkid, basestring, may_be_none=False)
_params = {}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {'bulkid': bulkid}
e_url = '/ers/config/sxpconnections/bulk/{bulkid}'
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params, headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c2fb20ca5eb79facdda896457507_v3_0_0', _api_response)
|
def monitor_bulk_status(self, bulkid, headers=None, **query_parameters):
'Alias for `monitor_bulk_status_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.monitor_bulk_status_sxp_connections>`_\n '
return self.monitor_bulk_status_sxp_connections(bulkid=bulkid, headers=headers, **query_parameters)
| 779,299,447,832,129,700
|
Alias for `monitor_bulk_status_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.monitor_bulk_status_sxp_connections>`_
|
ciscoisesdk/api/v3_0_0/sxp_connections.py
|
monitor_bulk_status
|
CiscoISE/ciscoisesdk
|
python
|
def monitor_bulk_status(self, bulkid, headers=None, **query_parameters):
'Alias for `monitor_bulk_status_sxp_connections <#ciscoisesdk.\n api.v3_0_0.sxp_connections.\n SxpConnections.monitor_bulk_status_sxp_connections>`_\n '
return self.monitor_bulk_status_sxp_connections(bulkid=bulkid, headers=headers, **query_parameters)
|
def __getitem__(self, key):
'\n This allows an object which is an instance of this class to behave\n like a dictionary when queried with [] syntax\n '
return self.config[key]
| 6,932,194,484,295,522,000
|
This allows an object which is an instance of this class to behave
like a dictionary when queried with [] syntax
|
blipp/conf.py
|
__getitem__
|
periscope-ps/blipp
|
python
|
def __getitem__(self, key):
'\n This allows an object which is an instance of this class to behave\n like a dictionary when queried with [] syntax\n '
return self.config[key]
|
def entropy_score(kmer):
'\n Schmieder and Edwards. Quality control and preprocessing of metagenomic datasets. (2011) Bioinformatics\n https://academic.oup.com/bioinformatics/article/27/6/863/236283/Quality-control-and-preprocessing-of-metagenomic\n '
l = (len(kmer) - 2)
k = (l if (l < 64) else 64)
counts = defaultdict(int)
for i in range(l):
trinuc = kmer[i:(i + 3)]
counts[trinuc] += 1
logk = math.log(k)
res = 0
for (k, v) in counts.items():
f = ((v * 1.0) / l)
res += ((f * math.log(f)) / logk)
return (res * (- 100))
| -6,284,588,666,856,321,000
|
Schmieder and Edwards. Quality control and preprocessing of metagenomic datasets. (2011) Bioinformatics
https://academic.oup.com/bioinformatics/article/27/6/863/236283/Quality-control-and-preprocessing-of-metagenomic
|
jcvi/assembly/kmer.py
|
entropy_score
|
lufuhao/jcvi
|
python
|
def entropy_score(kmer):
'\n Schmieder and Edwards. Quality control and preprocessing of metagenomic datasets. (2011) Bioinformatics\n https://academic.oup.com/bioinformatics/article/27/6/863/236283/Quality-control-and-preprocessing-of-metagenomic\n '
l = (len(kmer) - 2)
k = (l if (l < 64) else 64)
counts = defaultdict(int)
for i in range(l):
trinuc = kmer[i:(i + 3)]
counts[trinuc] += 1
logk = math.log(k)
res = 0
for (k, v) in counts.items():
f = ((v * 1.0) / l)
res += ((f * math.log(f)) / logk)
return (res * (- 100))
|
def entropy(args):
'\n %prog entropy kmc_dump.out\n\n kmc_dump.out contains two columns:\n AAAAAAAAAAAGAAGAAAGAAA 34\n '
p = OptionParser(entropy.__doc__)
p.add_option('--threshold', default=0, type='int', help='Complexity needs to be above')
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(kmc_out,) = args
fp = open(kmc_out)
for row in fp:
(kmer, count) = row.split()
score = entropy_score(kmer)
if (score >= opts.threshold):
print(' '.join((kmer, count, '{:.2f}'.format(score))))
| -6,110,690,022,701,924,000
|
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
|
jcvi/assembly/kmer.py
|
entropy
|
lufuhao/jcvi
|
python
|
def entropy(args):
'\n %prog entropy kmc_dump.out\n\n kmc_dump.out contains two columns:\n AAAAAAAAAAAGAAGAAAGAAA 34\n '
p = OptionParser(entropy.__doc__)
p.add_option('--threshold', default=0, type='int', help='Complexity needs to be above')
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(kmc_out,) = args
fp = open(kmc_out)
for row in fp:
(kmer, count) = row.split()
score = entropy_score(kmer)
if (score >= opts.threshold):
print(' '.join((kmer, count, '{:.2f}'.format(score))))
|
def bed(args):
'\n %prog bed fastafile kmer.dump.txt\n\n Map kmers on FASTA.\n '
from jcvi.formats.fasta import rc, parse_fasta
p = OptionParser(bed.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(fastafile, dumpfile) = args
fp = open(dumpfile)
KMERS = set()
for row in fp:
kmer = row.split()[0]
kmer_rc = rc(kmer)
KMERS.add(kmer)
KMERS.add(kmer_rc)
K = len(kmer)
logging.debug('Imported {} {}-mers'.format(len(KMERS), K))
for (name, seq) in parse_fasta(fastafile):
name = name.split()[0]
for i in range((len(seq) - K)):
if ((i % 5000000) == 0):
print('{}:{}'.format(name, i), file=sys.stderr)
kmer = seq[i:(i + K)]
if (kmer in KMERS):
print('\t'.join((str(x) for x in (name, i, (i + K), kmer))))
| 510,515,766,485,658,300
|
%prog bed fastafile kmer.dump.txt
Map kmers on FASTA.
|
jcvi/assembly/kmer.py
|
bed
|
lufuhao/jcvi
|
python
|
def bed(args):
'\n %prog bed fastafile kmer.dump.txt\n\n Map kmers on FASTA.\n '
from jcvi.formats.fasta import rc, parse_fasta
p = OptionParser(bed.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(fastafile, dumpfile) = args
fp = open(dumpfile)
KMERS = set()
for row in fp:
kmer = row.split()[0]
kmer_rc = rc(kmer)
KMERS.add(kmer)
KMERS.add(kmer_rc)
K = len(kmer)
logging.debug('Imported {} {}-mers'.format(len(KMERS), K))
for (name, seq) in parse_fasta(fastafile):
name = name.split()[0]
for i in range((len(seq) - K)):
if ((i % 5000000) == 0):
print('{}:{}'.format(name, i), file=sys.stderr)
kmer = seq[i:(i + K)]
if (kmer in KMERS):
print('\t'.join((str(x) for x in (name, i, (i + K), kmer))))
|
def kmcop(args):
'\n %prog kmcop *.kmc_suf\n\n Intersect or union kmc indices.\n '
p = OptionParser(kmcop.__doc__)
p.add_option('--action', choices=('union', 'intersect'), default='union', help='Action')
p.add_option('-o', default='results', help='Output name')
(opts, args) = p.parse_args(args)
if (len(args) < 2):
sys.exit((not p.print_help()))
indices = args
ku = KMCComplex(indices)
ku.write(opts.o, action=opts.action)
| 5,931,948,310,414,970,000
|
%prog kmcop *.kmc_suf
Intersect or union kmc indices.
|
jcvi/assembly/kmer.py
|
kmcop
|
lufuhao/jcvi
|
python
|
def kmcop(args):
'\n %prog kmcop *.kmc_suf\n\n Intersect or union kmc indices.\n '
p = OptionParser(kmcop.__doc__)
p.add_option('--action', choices=('union', 'intersect'), default='union', help='Action')
p.add_option('-o', default='results', help='Output name')
(opts, args) = p.parse_args(args)
if (len(args) < 2):
sys.exit((not p.print_help()))
indices = args
ku = KMCComplex(indices)
ku.write(opts.o, action=opts.action)
|
def kmc(args):
'\n %prog kmc folder\n\n Run kmc3 on Illumina reads.\n '
p = OptionParser(kmc.__doc__)
p.add_option('-k', default=21, type='int', help='Kmer size')
p.add_option('--ci', default=2, type='int', help='Exclude kmers with less than ci counts')
p.add_option('--cs', default=2, type='int', help='Maximal value of a counter')
p.add_option('--cx', default=None, type='int', help='Exclude kmers with more than cx counts')
p.add_option('--single', default=False, action='store_true', help='Input is single-end data, only one FASTQ/FASTA')
p.add_option('--fasta', default=False, action='store_true', help='Input is FASTA instead of FASTQ')
p.set_cpus()
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(folder,) = args
K = opts.k
n = (1 if opts.single else 2)
pattern = ('*.fa,*.fa.gz,*.fasta,*.fasta.gz' if opts.fasta else '*.fq,*.fq.gz,*.fastq,*.fastq.gz')
mm = MakeManager()
for (p, pf) in iter_project(folder, pattern=pattern, n=n, commonprefix=False):
pf = (pf.split('_')[0] + '.ms{}'.format(K))
infiles = (pf + '.infiles')
fw = open(infiles, 'w')
print('\n'.join(p), file=fw)
fw.close()
cmd = 'kmc -k{} -m64 -t{}'.format(K, opts.cpus)
cmd += ' -ci{} -cs{}'.format(opts.ci, opts.cs)
if opts.cx:
cmd += ' -cx{}'.format(opts.cx)
if opts.fasta:
cmd += ' -fm'
cmd += ' @{} {} .'.format(infiles, pf)
outfile = (pf + '.kmc_suf')
mm.add(p, outfile, cmd)
mm.write()
| 6,032,916,289,647,153,000
|
%prog kmc folder
Run kmc3 on Illumina reads.
|
jcvi/assembly/kmer.py
|
kmc
|
lufuhao/jcvi
|
python
|
def kmc(args):
'\n %prog kmc folder\n\n Run kmc3 on Illumina reads.\n '
p = OptionParser(kmc.__doc__)
p.add_option('-k', default=21, type='int', help='Kmer size')
p.add_option('--ci', default=2, type='int', help='Exclude kmers with less than ci counts')
p.add_option('--cs', default=2, type='int', help='Maximal value of a counter')
p.add_option('--cx', default=None, type='int', help='Exclude kmers with more than cx counts')
p.add_option('--single', default=False, action='store_true', help='Input is single-end data, only one FASTQ/FASTA')
p.add_option('--fasta', default=False, action='store_true', help='Input is FASTA instead of FASTQ')
p.set_cpus()
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(folder,) = args
K = opts.k
n = (1 if opts.single else 2)
pattern = ('*.fa,*.fa.gz,*.fasta,*.fasta.gz' if opts.fasta else '*.fq,*.fq.gz,*.fastq,*.fastq.gz')
mm = MakeManager()
for (p, pf) in iter_project(folder, pattern=pattern, n=n, commonprefix=False):
pf = (pf.split('_')[0] + '.ms{}'.format(K))
infiles = (pf + '.infiles')
fw = open(infiles, 'w')
print('\n'.join(p), file=fw)
fw.close()
cmd = 'kmc -k{} -m64 -t{}'.format(K, opts.cpus)
cmd += ' -ci{} -cs{}'.format(opts.ci, opts.cs)
if opts.cx:
cmd += ' -cx{}'.format(opts.cx)
if opts.fasta:
cmd += ' -fm'
cmd += ' @{} {} .'.format(infiles, pf)
outfile = (pf + '.kmc_suf')
mm.add(p, outfile, cmd)
mm.write()
|
def meryl(args):
'\n %prog meryl folder\n\n Run meryl on Illumina reads.\n '
p = OptionParser(meryl.__doc__)
p.add_option('-k', default=19, type='int', help='Kmer size')
p.set_cpus()
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(folder,) = args
K = opts.k
cpus = opts.cpus
mm = MakeManager()
for (p, pf) in iter_project(folder):
cmds = []
mss = []
for (i, ip) in enumerate(p):
ms = '{}{}.ms{}'.format(pf, (i + 1), K)
mss.append(ms)
cmd = 'meryl -B -C -m {} -threads {}'.format(K, cpus)
cmd += ' -s {} -o {}'.format(ip, ms)
cmds.append(cmd)
(ams, bms) = mss
pms = '{}.ms{}'.format(pf, K)
cmd = 'meryl -M add -s {} -s {} -o {}'.format(ams, bms, pms)
cmds.append(cmd)
cmd = 'rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx'.format(ams, ams, bms, bms)
cmds.append(cmd)
mm.add(p, (pms + '.mcdat'), cmds)
mm.write()
| 7,407,222,545,709,982,000
|
%prog meryl folder
Run meryl on Illumina reads.
|
jcvi/assembly/kmer.py
|
meryl
|
lufuhao/jcvi
|
python
|
def meryl(args):
'\n %prog meryl folder\n\n Run meryl on Illumina reads.\n '
p = OptionParser(meryl.__doc__)
p.add_option('-k', default=19, type='int', help='Kmer size')
p.set_cpus()
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(folder,) = args
K = opts.k
cpus = opts.cpus
mm = MakeManager()
for (p, pf) in iter_project(folder):
cmds = []
mss = []
for (i, ip) in enumerate(p):
ms = '{}{}.ms{}'.format(pf, (i + 1), K)
mss.append(ms)
cmd = 'meryl -B -C -m {} -threads {}'.format(K, cpus)
cmd += ' -s {} -o {}'.format(ip, ms)
cmds.append(cmd)
(ams, bms) = mss
pms = '{}.ms{}'.format(pf, K)
cmd = 'meryl -M add -s {} -s {} -o {}'.format(ams, bms, pms)
cmds.append(cmd)
cmd = 'rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx'.format(ams, ams, bms, bms)
cmds.append(cmd)
mm.add(p, (pms + '.mcdat'), cmds)
mm.write()
|
def model(args):
'\n %prog model erate\n\n Model kmer distribution given error rate. See derivation in FIONA paper:\n <http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>\n '
from scipy.stats import binom, poisson
p = OptionParser(model.__doc__)
p.add_option('-k', default=23, type='int', help='Kmer size')
p.add_option('--cov', default=50, type='int', help='Expected coverage')
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(erate,) = args
erate = float(erate)
cov = opts.cov
k = opts.k
xy = []
for c in range(0, ((cov * 2) + 1)):
Prob_Yk = 0
for i in range((k + 1)):
pi_i = binom.pmf(i, k, erate)
mu_i = ((cov * ((erate / 3) ** i)) * ((1 - erate) ** (k - i)))
Prob_Yk_i = poisson.pmf(c, mu_i)
Prob_Yk += (pi_i * Prob_Yk_i)
xy.append((c, Prob_Yk))
(x, y) = zip(*xy)
asciiplot(x, y, title='Model')
| 6,194,723,951,826,256,000
|
%prog model erate
Model kmer distribution given error rate. See derivation in FIONA paper:
<http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>
|
jcvi/assembly/kmer.py
|
model
|
lufuhao/jcvi
|
python
|
def model(args):
'\n %prog model erate\n\n Model kmer distribution given error rate. See derivation in FIONA paper:\n <http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>\n '
from scipy.stats import binom, poisson
p = OptionParser(model.__doc__)
p.add_option('-k', default=23, type='int', help='Kmer size')
p.add_option('--cov', default=50, type='int', help='Expected coverage')
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(erate,) = args
erate = float(erate)
cov = opts.cov
k = opts.k
xy = []
for c in range(0, ((cov * 2) + 1)):
Prob_Yk = 0
for i in range((k + 1)):
pi_i = binom.pmf(i, k, erate)
mu_i = ((cov * ((erate / 3) ** i)) * ((1 - erate) ** (k - i)))
Prob_Yk_i = poisson.pmf(c, mu_i)
Prob_Yk += (pi_i * Prob_Yk_i)
xy.append((c, Prob_Yk))
(x, y) = zip(*xy)
asciiplot(x, y, title='Model')
|
def logodds(args):
'\n %prog logodds cnt1 cnt2\n\n Compute log likelihood between two db.\n '
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(cnt1, cnt2) = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
(scf, c1) = row.split()
c2 = d[scf]
(c1, c2) = (float(c1), float(c2))
c1 += 1
c2 += 1
score = int((100 * (log(c1) - log(c2))))
print('{0}\t{1}'.format(scf, score))
| 6,346,075,636,277,798,000
|
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
|
jcvi/assembly/kmer.py
|
logodds
|
lufuhao/jcvi
|
python
|
def logodds(args):
'\n %prog logodds cnt1 cnt2\n\n Compute log likelihood between two db.\n '
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(cnt1, cnt2) = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
(scf, c1) = row.split()
c2 = d[scf]
(c1, c2) = (float(c1), float(c2))
c1 += 1
c2 += 1
score = int((100 * (log(c1) - log(c2))))
print('{0}\t{1}'.format(scf, score))
|
def get_K(jfdb):
'\n Infer K from jellyfish db.\n '
j = jfdb.rsplit('_', 1)[0].rsplit('-', 1)[(- 1)]
assert (j[0] == 'K')
return int(j[1:])
| 2,435,800,455,511,185,000
|
Infer K from jellyfish db.
|
jcvi/assembly/kmer.py
|
get_K
|
lufuhao/jcvi
|
python
|
def get_K(jfdb):
'\n \n '
j = jfdb.rsplit('_', 1)[0].rsplit('-', 1)[(- 1)]
assert (j[0] == 'K')
return int(j[1:])
|
def count(args):
'\n %prog count fastafile jf.db\n\n Run dump - jellyfish - bin - bincount in serial.\n '
from bitarray import bitarray
p = OptionParser(count.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(fastafile, jfdb) = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open('tmp', 'w')
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for (name, rec) in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print('\n'.join(kmers), file=proc.stdin)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = '.'.join((fastafile, jfdb, 'bin'))
fw = open(binfile, 'w')
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug('Serialize {0} bits to `{1}`.'.format(len(a), binfile))
fw.close()
sh('rm {0}'.format(t.name))
logging.debug('Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.'.format(K, fastafile, jfdb, binfile))
cntfile = '.'.join((fastafile, jfdb, 'cnt'))
bincount([fastafile, binfile, '-o', cntfile, '-K {0}'.format(K)])
logging.debug('Shared K-mer counts written to `{0}`.'.format(cntfile))
| 4,242,329,288,736,255,000
|
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
|
jcvi/assembly/kmer.py
|
count
|
lufuhao/jcvi
|
python
|
def count(args):
'\n %prog count fastafile jf.db\n\n Run dump - jellyfish - bin - bincount in serial.\n '
from bitarray import bitarray
p = OptionParser(count.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(fastafile, jfdb) = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open('tmp', 'w')
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for (name, rec) in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print('\n'.join(kmers), file=proc.stdin)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = '.'.join((fastafile, jfdb, 'bin'))
fw = open(binfile, 'w')
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug('Serialize {0} bits to `{1}`.'.format(len(a), binfile))
fw.close()
sh('rm {0}'.format(t.name))
logging.debug('Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.'.format(K, fastafile, jfdb, binfile))
cntfile = '.'.join((fastafile, jfdb, 'cnt'))
bincount([fastafile, binfile, '-o', cntfile, '-K {0}'.format(K)])
logging.debug('Shared K-mer counts written to `{0}`.'.format(cntfile))
|
def bincount(args):
'\n %prog bincount fastafile binfile\n\n Count K-mers in the bin.\n '
from bitarray import bitarray
from jcvi.formats.sizes import Sizes
p = OptionParser(bincount.__doc__)
p.add_option('-K', default=23, type='int', help='K-mer size')
p.set_outfile()
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(fastafile, binfile) = args
K = opts.K
fp = open(binfile)
a = bitarray()
a.fromfile(fp)
f = Sizes(fastafile)
tsize = 0
fw = must_open(opts.outfile, 'w')
for (name, seqlen) in f.iter_sizes():
ksize = ((seqlen - K) + 1)
b = a[tsize:(tsize + ksize)]
bcount = b.count()
print('\t'.join((str(x) for x in (name, bcount))), file=fw)
tsize += ksize
| 1,612,570,740,213,328,000
|
%prog bincount fastafile binfile
Count K-mers in the bin.
|
jcvi/assembly/kmer.py
|
bincount
|
lufuhao/jcvi
|
python
|
def bincount(args):
'\n %prog bincount fastafile binfile\n\n Count K-mers in the bin.\n '
from bitarray import bitarray
from jcvi.formats.sizes import Sizes
p = OptionParser(bincount.__doc__)
p.add_option('-K', default=23, type='int', help='K-mer size')
p.set_outfile()
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(fastafile, binfile) = args
K = opts.K
fp = open(binfile)
a = bitarray()
a.fromfile(fp)
f = Sizes(fastafile)
tsize = 0
fw = must_open(opts.outfile, 'w')
for (name, seqlen) in f.iter_sizes():
ksize = ((seqlen - K) + 1)
b = a[tsize:(tsize + ksize)]
bcount = b.count()
print('\t'.join((str(x) for x in (name, bcount))), file=fw)
tsize += ksize
|
def bin(args):
'\n %prog bin filename filename.bin\n\n Serialize counts to bitarrays.\n '
from bitarray import bitarray
p = OptionParser(bin.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(inp, outp) = args
fp = must_open(inp)
fw = must_open(outp, 'w')
a = bitarray()
for row in fp:
c = row.split()[(- 1)]
a.append(int(c))
a.tofile(fw)
fw.close()
| -320,122,754,336,861,250
|
%prog bin filename filename.bin
Serialize counts to bitarrays.
|
jcvi/assembly/kmer.py
|
bin
|
lufuhao/jcvi
|
python
|
def bin(args):
'\n %prog bin filename filename.bin\n\n Serialize counts to bitarrays.\n '
from bitarray import bitarray
p = OptionParser(bin.__doc__)
(opts, args) = p.parse_args(args)
if (len(args) != 2):
sys.exit((not p.print_help()))
(inp, outp) = args
fp = must_open(inp)
fw = must_open(outp, 'w')
a = bitarray()
for row in fp:
c = row.split()[(- 1)]
a.append(int(c))
a.tofile(fw)
fw.close()
|
def dump(args):
'\n %prog dump fastafile\n\n Convert FASTA sequences to list of K-mers.\n '
p = OptionParser(dump.__doc__)
p.add_option('-K', default=23, type='int', help='K-mer size')
p.set_outfile()
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(fastafile,) = args
K = opts.K
fw = must_open(opts.outfile, 'w')
f = Fasta(fastafile, lazy=True)
for (name, rec) in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print('\n'.join(kmers), file=fw)
fw.close()
| -9,024,217,095,251,740,000
|
%prog dump fastafile
Convert FASTA sequences to list of K-mers.
|
jcvi/assembly/kmer.py
|
dump
|
lufuhao/jcvi
|
python
|
def dump(args):
'\n %prog dump fastafile\n\n Convert FASTA sequences to list of K-mers.\n '
p = OptionParser(dump.__doc__)
p.add_option('-K', default=23, type='int', help='K-mer size')
p.set_outfile()
(opts, args) = p.parse_args(args)
if (len(args) != 1):
sys.exit((not p.print_help()))
(fastafile,) = args
K = opts.K
fw = must_open(opts.outfile, 'w')
f = Fasta(fastafile, lazy=True)
for (name, rec) in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print('\n'.join(kmers), file=fw)
fw.close()
|
def jellyfish(args):
'\n %prog jellyfish [*.fastq|*.fasta]\n\n Run jellyfish to dump histogram to be used in kmer.histogram().\n '
from jcvi.apps.base import getfilesize
from jcvi.utils.cbook import human_size
p = OptionParser(jellyfish.__doc__)
p.add_option('-K', default=23, type='int', help='K-mer size')
p.add_option('--coverage', default=40, type='int', help='Expected sequence coverage')
p.add_option('--prefix', default='jf', help='Database prefix')
p.add_option('--nohist', default=False, action='store_true', help='Do not print histogram')
p.set_home('jellyfish')
p.set_cpus()
(opts, args) = p.parse_args(args)
if (len(args) < 1):
sys.exit((not p.print_help()))
fastqfiles = args
K = opts.K
coverage = opts.coverage
totalfilesize = sum((getfilesize(x) for x in fastqfiles))
fq = fastqfiles[0]
pf = opts.prefix
gzip = fq.endswith('.gz')
hashsize = (totalfilesize / coverage)
logging.debug('Total file size: {0}, hashsize (-s): {1}'.format(human_size(totalfilesize, a_kilobyte_is_1024_bytes=True), hashsize))
jfpf = '{0}-K{1}'.format(pf, K)
jfdb = jfpf
fastqfiles = ' '.join(fastqfiles)
jfcmd = op.join(opts.jellyfish_home, 'jellyfish')
cmd = jfcmd
cmd += ' count -t {0} -C -o {1}'.format(opts.cpus, jfpf)
cmd += ' -s {0} -m {1}'.format(hashsize, K)
if gzip:
cmd = (('gzip -dc {0} | '.format(fastqfiles) + cmd) + ' /dev/fd/0')
else:
cmd += (' ' + fastqfiles)
if need_update(fastqfiles, jfdb):
sh(cmd)
if opts.nohist:
return
jfhisto = (jfpf + '.histogram')
cmd = (jfcmd + ' histo -t 64 {0} -o {1}'.format(jfdb, jfhisto))
if need_update(jfdb, jfhisto):
sh(cmd)
| -7,835,718,848,644,380,000
|
%prog jellyfish [*.fastq|*.fasta]
Run jellyfish to dump histogram to be used in kmer.histogram().
|
jcvi/assembly/kmer.py
|
jellyfish
|
lufuhao/jcvi
|
python
|
def jellyfish(args):
'\n %prog jellyfish [*.fastq|*.fasta]\n\n Run jellyfish to dump histogram to be used in kmer.histogram().\n '
from jcvi.apps.base import getfilesize
from jcvi.utils.cbook import human_size
p = OptionParser(jellyfish.__doc__)
p.add_option('-K', default=23, type='int', help='K-mer size')
p.add_option('--coverage', default=40, type='int', help='Expected sequence coverage')
p.add_option('--prefix', default='jf', help='Database prefix')
p.add_option('--nohist', default=False, action='store_true', help='Do not print histogram')
p.set_home('jellyfish')
p.set_cpus()
(opts, args) = p.parse_args(args)
if (len(args) < 1):
sys.exit((not p.print_help()))
fastqfiles = args
K = opts.K
coverage = opts.coverage
totalfilesize = sum((getfilesize(x) for x in fastqfiles))
fq = fastqfiles[0]
pf = opts.prefix
gzip = fq.endswith('.gz')
hashsize = (totalfilesize / coverage)
logging.debug('Total file size: {0}, hashsize (-s): {1}'.format(human_size(totalfilesize, a_kilobyte_is_1024_bytes=True), hashsize))
jfpf = '{0}-K{1}'.format(pf, K)
jfdb = jfpf
fastqfiles = ' '.join(fastqfiles)
jfcmd = op.join(opts.jellyfish_home, 'jellyfish')
cmd = jfcmd
cmd += ' count -t {0} -C -o {1}'.format(opts.cpus, jfpf)
cmd += ' -s {0} -m {1}'.format(hashsize, K)
if gzip:
cmd = (('gzip -dc {0} | '.format(fastqfiles) + cmd) + ' /dev/fd/0')
else:
cmd += (' ' + fastqfiles)
if need_update(fastqfiles, jfdb):
sh(cmd)
if opts.nohist:
return
jfhisto = (jfpf + '.histogram')
cmd = (jfcmd + ' histo -t 64 {0} -o {1}'.format(jfdb, jfhisto))
if need_update(jfdb, jfhisto):
sh(cmd)
|
def merylhistogram(merylfile):
'\n Run meryl to dump histogram to be used in kmer.histogram(). The merylfile\n are the files ending in .mcidx or .mcdat.\n '
(pf, sf) = op.splitext(merylfile)
outfile = (pf + '.histogram')
if need_update(merylfile, outfile):
cmd = 'meryl -Dh -s {0}'.format(pf)
sh(cmd, outfile=outfile)
return outfile
| 4,546,519,817,128,407,000
|
Run meryl to dump histogram to be used in kmer.histogram(). The merylfile
are the files ending in .mcidx or .mcdat.
|
jcvi/assembly/kmer.py
|
merylhistogram
|
lufuhao/jcvi
|
python
|
def merylhistogram(merylfile):
'\n Run meryl to dump histogram to be used in kmer.histogram(). The merylfile\n are the files ending in .mcidx or .mcdat.\n '
(pf, sf) = op.splitext(merylfile)
outfile = (pf + '.histogram')
if need_update(merylfile, outfile):
cmd = 'meryl -Dh -s {0}'.format(pf)
sh(cmd, outfile=outfile)
return outfile
|
def multihistogram(args):
"\n %prog multihistogram *.histogram species\n\n Plot the histogram based on a set of K-mer hisotograms. The method is based\n on Star et al.'s method (Atlantic Cod genome paper).\n "
p = OptionParser(multihistogram.__doc__)
p.add_option('--kmin', default=15, type='int', help='Minimum K-mer size, inclusive')
p.add_option('--kmax', default=30, type='int', help='Maximum K-mer size, inclusive')
p.add_option('--vmin', default=2, type='int', help='Minimum value, inclusive')
p.add_option('--vmax', default=100, type='int', help='Maximum value, inclusive')
(opts, args, iopts) = p.set_image_options(args, figsize='10x5', dpi=300)
if (len(args) < 1):
sys.exit((not p.print_help()))
histfiles = args[:(- 1)]
species = args[(- 1)]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([0.08, 0.12, 0.38, 0.76])
B = fig.add_axes([0.58, 0.12, 0.38, 0.76])
lines = []
legends = []
genomesizes = []
for histfile in histfiles:
ks = KmerSpectrum(histfile)
(x, y) = ks.get_xy(opts.vmin, opts.vmax)
K = get_number(op.basename(histfile).split('.')[0].split('-')[(- 1)])
if (not (opts.kmin <= K <= opts.kmax)):
continue
(line,) = A.plot(x, y, '-', lw=1)
lines.append(line)
legends.append('K = {0}'.format(K))
ks.analyze(K=K)
genomesizes.append((K, (ks.genomesize / 1000000.0)))
leg = A.legend(lines, legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
title = '{0} genome K-mer histogram'.format(species)
A.set_title(markup(title))
(xlabel, ylabel) = ('Coverage (X)', 'Counts')
A.set_xlabel(xlabel)
A.set_ylabel(ylabel)
set_human_axis(A)
title = '{0} genome size estimate'.format(species)
B.set_title(markup(title))
(x, y) = zip(*genomesizes)
B.plot(x, y, 'ko', mfc='w')
t = np.linspace((opts.kmin - 0.5), (opts.kmax + 0.5), 100)
p = np.poly1d(np.polyfit(x, y, 2))
B.plot(t, p(t), 'r:')
(xlabel, ylabel) = ('K-mer size', 'Estimated genome size (Mb)')
B.set_xlabel(xlabel)
B.set_ylabel(ylabel)
set_ticklabels_arial(B)
labels = ((0.04, 0.96, 'A'), (0.54, 0.96, 'B'))
panel_labels(root, labels)
normalize_axes(root)
imagename = (species + '.multiK.pdf')
savefig(imagename, dpi=iopts.dpi, iopts=iopts)
| -2,772,674,749,330,491,400
|
%prog multihistogram *.histogram species
Plot the histogram based on a set of K-mer hisotograms. The method is based
on Star et al.'s method (Atlantic Cod genome paper).
|
jcvi/assembly/kmer.py
|
multihistogram
|
lufuhao/jcvi
|
python
|
def multihistogram(args):
"\n %prog multihistogram *.histogram species\n\n Plot the histogram based on a set of K-mer hisotograms. The method is based\n on Star et al.'s method (Atlantic Cod genome paper).\n "
p = OptionParser(multihistogram.__doc__)
p.add_option('--kmin', default=15, type='int', help='Minimum K-mer size, inclusive')
p.add_option('--kmax', default=30, type='int', help='Maximum K-mer size, inclusive')
p.add_option('--vmin', default=2, type='int', help='Minimum value, inclusive')
p.add_option('--vmax', default=100, type='int', help='Maximum value, inclusive')
(opts, args, iopts) = p.set_image_options(args, figsize='10x5', dpi=300)
if (len(args) < 1):
sys.exit((not p.print_help()))
histfiles = args[:(- 1)]
species = args[(- 1)]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([0.08, 0.12, 0.38, 0.76])
B = fig.add_axes([0.58, 0.12, 0.38, 0.76])
lines = []
legends = []
genomesizes = []
for histfile in histfiles:
ks = KmerSpectrum(histfile)
(x, y) = ks.get_xy(opts.vmin, opts.vmax)
K = get_number(op.basename(histfile).split('.')[0].split('-')[(- 1)])
if (not (opts.kmin <= K <= opts.kmax)):
continue
(line,) = A.plot(x, y, '-', lw=1)
lines.append(line)
legends.append('K = {0}'.format(K))
ks.analyze(K=K)
genomesizes.append((K, (ks.genomesize / 1000000.0)))
leg = A.legend(lines, legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
title = '{0} genome K-mer histogram'.format(species)
A.set_title(markup(title))
(xlabel, ylabel) = ('Coverage (X)', 'Counts')
A.set_xlabel(xlabel)
A.set_ylabel(ylabel)
set_human_axis(A)
title = '{0} genome size estimate'.format(species)
B.set_title(markup(title))
(x, y) = zip(*genomesizes)
B.plot(x, y, 'ko', mfc='w')
t = np.linspace((opts.kmin - 0.5), (opts.kmax + 0.5), 100)
p = np.poly1d(np.polyfit(x, y, 2))
B.plot(t, p(t), 'r:')
(xlabel, ylabel) = ('K-mer size', 'Estimated genome size (Mb)')
B.set_xlabel(xlabel)
B.set_ylabel(ylabel)
set_ticklabels_arial(B)
labels = ((0.04, 0.96, 'A'), (0.54, 0.96, 'B'))
panel_labels(root, labels)
normalize_axes(root)
imagename = (species + '.multiK.pdf')
savefig(imagename, dpi=iopts.dpi, iopts=iopts)
|
def histogram(args):
'\n %prog histogram meryl.histogram species K\n\n Plot the histogram based on meryl K-mer distribution, species and N are\n only used to annotate the graphic.\n '
p = OptionParser(histogram.__doc__)
p.add_option('--vmin', dest='vmin', default=1, type='int', help='minimum value, inclusive')
p.add_option('--vmax', dest='vmax', default=100, type='int', help='maximum value, inclusive')
p.add_option('--pdf', default=False, action='store_true', help='Print PDF instead of ASCII plot')
p.add_option('--coverage', default=0, type='int', help='Kmer coverage [default: auto]')
p.add_option('--nopeaks', default=False, action='store_true', help='Do not annotate K-mer peaks')
(opts, args) = p.parse_args(args)
if (len(args) != 3):
sys.exit((not p.print_help()))
(histfile, species, N) = args
ascii = (not opts.pdf)
peaks = (not opts.nopeaks)
N = int(N)
if (histfile.rsplit('.', 1)[(- 1)] in ('mcdat', 'mcidx')):
logging.debug('CA kmer index found')
histfile = merylhistogram(histfile)
ks = KmerSpectrum(histfile)
ks.analyze(K=N)
Total_Kmers = int(ks.totalKmers)
coverage = opts.coverage
Kmer_coverage = (ks.max2 if (not coverage) else coverage)
Genome_size = int(round(((Total_Kmers * 1.0) / Kmer_coverage)))
Total_Kmers_msg = 'Total {0}-mers: {1}'.format(N, thousands(Total_Kmers))
Kmer_coverage_msg = '{0}-mer coverage: {1}'.format(N, Kmer_coverage)
Genome_size_msg = 'Estimated genome size: {0:.1f}Mb'.format((Genome_size / 1000000.0))
Repetitive_msg = ks.repetitive
SNPrate_msg = ks.snprate
for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):
print(msg, file=sys.stderr)
(x, y) = ks.get_xy(opts.vmin, opts.vmax)
title = '{0} {1}-mer histogram'.format(species, N)
if ascii:
asciiplot(x, y, title=title)
return Genome_size
plt.figure(1, (6, 6))
plt.plot(x, y, 'g-', lw=2, alpha=0.5)
ax = plt.gca()
if peaks:
t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)
tcounts = [(x, y) for (x, y) in ks.counts if (x in t)]
if tcounts:
(x, y) = zip(*tcounts)
tcounts = dict(tcounts)
plt.plot(x, y, 'ko', lw=2, mec='k', mfc='w')
ax.text(ks.max1, tcounts[ks.max1], 'SNP peak', va='top')
ax.text(ks.max2, tcounts[ks.max2], 'Main peak')
messages = [Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg, Repetitive_msg, SNPrate_msg]
write_messages(ax, messages)
(ymin, ymax) = ax.get_ylim()
ymax = ((ymax * 7) / 6)
ax.set_title(markup(title))
ax.set_ylim((ymin, ymax))
(xlabel, ylabel) = ('Coverage (X)', 'Counts')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
set_human_axis(ax)
imagename = (histfile.split('.')[0] + '.pdf')
savefig(imagename, dpi=100)
return Genome_size
| 4,191,076,299,505,616,400
|
%prog histogram meryl.histogram species K
Plot the histogram based on meryl K-mer distribution, species and N are
only used to annotate the graphic.
|
jcvi/assembly/kmer.py
|
histogram
|
lufuhao/jcvi
|
python
|
def histogram(args):
'\n %prog histogram meryl.histogram species K\n\n Plot the histogram based on meryl K-mer distribution, species and N are\n only used to annotate the graphic.\n '
p = OptionParser(histogram.__doc__)
p.add_option('--vmin', dest='vmin', default=1, type='int', help='minimum value, inclusive')
p.add_option('--vmax', dest='vmax', default=100, type='int', help='maximum value, inclusive')
p.add_option('--pdf', default=False, action='store_true', help='Print PDF instead of ASCII plot')
p.add_option('--coverage', default=0, type='int', help='Kmer coverage [default: auto]')
p.add_option('--nopeaks', default=False, action='store_true', help='Do not annotate K-mer peaks')
(opts, args) = p.parse_args(args)
if (len(args) != 3):
sys.exit((not p.print_help()))
(histfile, species, N) = args
ascii = (not opts.pdf)
peaks = (not opts.nopeaks)
N = int(N)
if (histfile.rsplit('.', 1)[(- 1)] in ('mcdat', 'mcidx')):
logging.debug('CA kmer index found')
histfile = merylhistogram(histfile)
ks = KmerSpectrum(histfile)
ks.analyze(K=N)
Total_Kmers = int(ks.totalKmers)
coverage = opts.coverage
Kmer_coverage = (ks.max2 if (not coverage) else coverage)
Genome_size = int(round(((Total_Kmers * 1.0) / Kmer_coverage)))
Total_Kmers_msg = 'Total {0}-mers: {1}'.format(N, thousands(Total_Kmers))
Kmer_coverage_msg = '{0}-mer coverage: {1}'.format(N, Kmer_coverage)
Genome_size_msg = 'Estimated genome size: {0:.1f}Mb'.format((Genome_size / 1000000.0))
Repetitive_msg = ks.repetitive
SNPrate_msg = ks.snprate
for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):
print(msg, file=sys.stderr)
(x, y) = ks.get_xy(opts.vmin, opts.vmax)
title = '{0} {1}-mer histogram'.format(species, N)
if ascii:
asciiplot(x, y, title=title)
return Genome_size
plt.figure(1, (6, 6))
plt.plot(x, y, 'g-', lw=2, alpha=0.5)
ax = plt.gca()
if peaks:
t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)
tcounts = [(x, y) for (x, y) in ks.counts if (x in t)]
if tcounts:
(x, y) = zip(*tcounts)
tcounts = dict(tcounts)
plt.plot(x, y, 'ko', lw=2, mec='k', mfc='w')
ax.text(ks.max1, tcounts[ks.max1], 'SNP peak', va='top')
ax.text(ks.max2, tcounts[ks.max2], 'Main peak')
messages = [Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg, Repetitive_msg, SNPrate_msg]
write_messages(ax, messages)
(ymin, ymax) = ax.get_ylim()
ymax = ((ymax * 7) / 6)
ax.set_title(markup(title))
ax.set_ylim((ymin, ymax))
(xlabel, ylabel) = ('Coverage (X)', 'Counts')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
set_human_axis(ax)
imagename = (histfile.split('.')[0] + '.pdf')
savefig(imagename, dpi=100)
return Genome_size
|
def analyze(self, ploidy=2, K=23, covmax=1000000):
'\n Analyze Kmer spectrum, calculations derived from\n allpathslg/src/kmers/KmerSpectra.cc\n '
from math import sqrt
data = self.data
kf_ceil = max((K for (K, c) in data))
if (kf_ceil > covmax):
exceeds = sum((1 for (K, c) in data if (K > covmax)))
logging.debug('A total of {0} distinct K-mers appear > {1} times. Ignored ...'.format(exceeds, covmax))
kf_ceil = covmax
nkf = (kf_ceil + 1)
a = ([0] * nkf)
for (kf, c) in data:
if (kf > kf_ceil):
continue
a[kf] = c
ndk = a
nk = [(k * c) for (k, c) in enumerate(a)]
cndk = ([0] * nkf)
cnk = ([0] * nkf)
for kf in range(1, nkf):
cndk[kf] = (cndk[(kf - 1)] + (0.5 * (ndk[(kf - 1)] + ndk[kf])))
cnk[kf] = (cnk[(kf - 1)] + (0.5 * (nk[(kf - 1)] + nk[kf])))
_kf_min1 = 10
while (((_kf_min1 - 1) >= 2) and (nk[(_kf_min1 - 1)] < nk[_kf_min1])):
_kf_min1 -= 1
while ((_kf_min1 <= kf_ceil) and (nk[(_kf_min1 + 1)] < nk[_kf_min1])):
_kf_min1 += 1
_kf_max2 = _kf_min1
for kf in range((_kf_min1 + 1), int((0.8 * kf_ceil))):
if (nk[kf] > nk[_kf_max2]):
_kf_max2 = kf
if (ploidy == 2):
ndk_half = ndk[(_kf_max2 / 2)]
ndk_double = ndk[(_kf_max2 * 2)]
if (ndk_double > ndk_half):
_kf_max2 *= 2
_kf_max1 = (_kf_max2 / 2)
_kf_min2 = ((_kf_max1 * ((2 * ndk[_kf_max1]) + ndk[_kf_max2])) / (ndk[_kf_max1] + ndk[_kf_max2]))
for kf in range((_kf_min1 + 1), _kf_max1):
if (nk[kf] < nk[_kf_min1]):
_kf_min1 = kf
_kf_min3 = ((_kf_max2 * 3) / 2)
print('kfs:', _kf_min1, _kf_max1, _kf_min2, _kf_max2, _kf_min3, file=sys.stderr)
self.min1 = _kf_min1
self.max1 = _kf_max1
self.min2 = _kf_min2
self.max2 = _kf_max2
self.min3 = _kf_min3
_kf_hi = ((_kf_max2 * sqrt(((4 * ndk[(2 * _kf_max2)]) * _kf_max2))) if ((2 * _kf_max2) < len(ndk)) else (_kf_max2 * sqrt(((4 * ndk[(len(ndk) - 1)]) * _kf_max2))))
_kf_hi = int(_kf_hi)
if (_kf_hi > kf_ceil):
_kf_hi = kf_ceil
_nk_total = cnk[(len(cnk) - 1)]
_nk_bad_low_kf = cnk[_kf_min1]
_nk_good_uniq = (cnk[_kf_min3] - cnk[_kf_min2])
_nk_bad_high_kf = (_nk_total - cnk[_kf_hi])
_ndk_good_snp = (cndk[_kf_min2] - cndk[_kf_min1])
_ndk_good_uniq = (cndk[_kf_min3] - cndk[_kf_min2])
_kf_ave_uniq = ((_nk_good_uniq * 1.0) / _ndk_good_uniq)
_genome_size = (((_nk_total - _nk_bad_low_kf) - _nk_bad_high_kf) / _kf_ave_uniq)
_genome_size_unique = (_ndk_good_uniq + (_ndk_good_snp / 2))
_genome_size_repetitive = (_genome_size - _genome_size_unique)
_coverage = ((_nk_total / _genome_size) if _genome_size else 0)
if (ploidy == 2):
_d_SNP = ((1.0 / (1.0 - ((1.0 - ((0.5 * _ndk_good_snp) / _genome_size)) ** (1.0 / K)))) if (_ndk_good_snp > 0) else 1000000)
G = int(_genome_size)
G1 = int(_genome_size_unique)
GR = int(_genome_size_repetitive)
coverage = int(_coverage)
m = 'Kmer (K={0}) Spectrum Analysis\n'.format(K)
m += 'Genome size estimate = {0}\n'.format(thousands(G))
m += 'Genome size estimate CN = 1 = {0} ({1})\n'.format(thousands(G1), percentage(G1, G))
m += 'Genome size estimate CN > 1 = {0} ({1})\n'.format(thousands(GR), percentage(GR, G))
m += 'Coverage estimate: {0} x\n'.format(coverage)
self.repetitive = 'Repeats: {0} percent'.format(((GR * 100) / G))
if (ploidy == 2):
d_SNP = int(_d_SNP)
self.snprate = 'SNP rate ~= 1/{0}'.format(d_SNP)
else:
self.snprate = 'SNP rate not computed (Ploidy = {0})'.format(ploidy)
m += (self.snprate + '\n')
self.genomesize = int(round(((self.totalKmers * 1.0) / self.max2)))
print(m, file=sys.stderr)
| 7,798,149,655,913,767,000
|
Analyze Kmer spectrum, calculations derived from
allpathslg/src/kmers/KmerSpectra.cc
|
jcvi/assembly/kmer.py
|
analyze
|
lufuhao/jcvi
|
python
|
def analyze(self, ploidy=2, K=23, covmax=1000000):
'\n Analyze Kmer spectrum, calculations derived from\n allpathslg/src/kmers/KmerSpectra.cc\n '
from math import sqrt
data = self.data
kf_ceil = max((K for (K, c) in data))
if (kf_ceil > covmax):
exceeds = sum((1 for (K, c) in data if (K > covmax)))
logging.debug('A total of {0} distinct K-mers appear > {1} times. Ignored ...'.format(exceeds, covmax))
kf_ceil = covmax
nkf = (kf_ceil + 1)
a = ([0] * nkf)
for (kf, c) in data:
if (kf > kf_ceil):
continue
a[kf] = c
ndk = a
nk = [(k * c) for (k, c) in enumerate(a)]
cndk = ([0] * nkf)
cnk = ([0] * nkf)
for kf in range(1, nkf):
cndk[kf] = (cndk[(kf - 1)] + (0.5 * (ndk[(kf - 1)] + ndk[kf])))
cnk[kf] = (cnk[(kf - 1)] + (0.5 * (nk[(kf - 1)] + nk[kf])))
_kf_min1 = 10
while (((_kf_min1 - 1) >= 2) and (nk[(_kf_min1 - 1)] < nk[_kf_min1])):
_kf_min1 -= 1
while ((_kf_min1 <= kf_ceil) and (nk[(_kf_min1 + 1)] < nk[_kf_min1])):
_kf_min1 += 1
_kf_max2 = _kf_min1
for kf in range((_kf_min1 + 1), int((0.8 * kf_ceil))):
if (nk[kf] > nk[_kf_max2]):
_kf_max2 = kf
if (ploidy == 2):
ndk_half = ndk[(_kf_max2 / 2)]
ndk_double = ndk[(_kf_max2 * 2)]
if (ndk_double > ndk_half):
_kf_max2 *= 2
_kf_max1 = (_kf_max2 / 2)
_kf_min2 = ((_kf_max1 * ((2 * ndk[_kf_max1]) + ndk[_kf_max2])) / (ndk[_kf_max1] + ndk[_kf_max2]))
for kf in range((_kf_min1 + 1), _kf_max1):
if (nk[kf] < nk[_kf_min1]):
_kf_min1 = kf
_kf_min3 = ((_kf_max2 * 3) / 2)
print('kfs:', _kf_min1, _kf_max1, _kf_min2, _kf_max2, _kf_min3, file=sys.stderr)
self.min1 = _kf_min1
self.max1 = _kf_max1
self.min2 = _kf_min2
self.max2 = _kf_max2
self.min3 = _kf_min3
_kf_hi = ((_kf_max2 * sqrt(((4 * ndk[(2 * _kf_max2)]) * _kf_max2))) if ((2 * _kf_max2) < len(ndk)) else (_kf_max2 * sqrt(((4 * ndk[(len(ndk) - 1)]) * _kf_max2))))
_kf_hi = int(_kf_hi)
if (_kf_hi > kf_ceil):
_kf_hi = kf_ceil
_nk_total = cnk[(len(cnk) - 1)]
_nk_bad_low_kf = cnk[_kf_min1]
_nk_good_uniq = (cnk[_kf_min3] - cnk[_kf_min2])
_nk_bad_high_kf = (_nk_total - cnk[_kf_hi])
_ndk_good_snp = (cndk[_kf_min2] - cndk[_kf_min1])
_ndk_good_uniq = (cndk[_kf_min3] - cndk[_kf_min2])
_kf_ave_uniq = ((_nk_good_uniq * 1.0) / _ndk_good_uniq)
_genome_size = (((_nk_total - _nk_bad_low_kf) - _nk_bad_high_kf) / _kf_ave_uniq)
_genome_size_unique = (_ndk_good_uniq + (_ndk_good_snp / 2))
_genome_size_repetitive = (_genome_size - _genome_size_unique)
_coverage = ((_nk_total / _genome_size) if _genome_size else 0)
if (ploidy == 2):
_d_SNP = ((1.0 / (1.0 - ((1.0 - ((0.5 * _ndk_good_snp) / _genome_size)) ** (1.0 / K)))) if (_ndk_good_snp > 0) else 1000000)
G = int(_genome_size)
G1 = int(_genome_size_unique)
GR = int(_genome_size_repetitive)
coverage = int(_coverage)
m = 'Kmer (K={0}) Spectrum Analysis\n'.format(K)
m += 'Genome size estimate = {0}\n'.format(thousands(G))
m += 'Genome size estimate CN = 1 = {0} ({1})\n'.format(thousands(G1), percentage(G1, G))
m += 'Genome size estimate CN > 1 = {0} ({1})\n'.format(thousands(GR), percentage(GR, G))
m += 'Coverage estimate: {0} x\n'.format(coverage)
self.repetitive = 'Repeats: {0} percent'.format(((GR * 100) / G))
if (ploidy == 2):
d_SNP = int(_d_SNP)
self.snprate = 'SNP rate ~= 1/{0}'.format(d_SNP)
else:
self.snprate = 'SNP rate not computed (Ploidy = {0})'.format(ploidy)
m += (self.snprate + '\n')
self.genomesize = int(round(((self.totalKmers * 1.0) / self.max2)))
print(m, file=sys.stderr)
|
def __init__(self, classifier, confidence=0.0, targeted=True, learning_rate=0.01, binary_search_steps=9, max_iter=10000, beta=0.001, initial_const=0.001, batch_size=128, decision_rule='EN'):
"\n Create an ElasticNet attack instance.\n\n :param classifier: A trained model.\n :type classifier: :class:`.Classifier`\n :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther\n away, from the original input, but classified with higher confidence as the target class.\n :type confidence: `float`\n :param targeted: Should the attack target one specific class.\n :type targeted: `bool`\n :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better\n results but are slower to converge.\n :type learning_rate: `float`\n :param binary_search_steps: Number of times to adjust constant with binary search (positive value).\n :type binary_search_steps: `int`\n :param max_iter: The maximum number of iterations.\n :type max_iter: `int`\n :param beta: Hyperparameter trading off L2 minimization for L1 minimization.\n :type beta: `float`\n :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance\n and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in\n Carlini and Wagner (2016).\n :type initial_const: `float`\n :param batch_size: Internal size of batches on which adversarial samples are generated.\n :type batch_size: `int`\n :param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.\n :type decision_rule: `string`\n "
super(ElasticNet, self).__init__(classifier)
kwargs = {'confidence': confidence, 'targeted': targeted, 'learning_rate': learning_rate, 'binary_search_steps': binary_search_steps, 'max_iter': max_iter, 'beta': beta, 'initial_const': initial_const, 'batch_size': batch_size, 'decision_rule': decision_rule}
assert self.set_params(**kwargs)
| -4,750,755,069,409,097,000
|
Create an ElasticNet attack instance.
:param classifier: A trained model.
:type classifier: :class:`.Classifier`
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther
away, from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
results but are slower to converge.
:type learning_rate: `float`
:param binary_search_steps: Number of times to adjust constant with binary search (positive value).
:type binary_search_steps: `int`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param beta: Hyperparameter trading off L2 minimization for L1 minimization.
:type beta: `float`
:param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance
and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
Carlini and Wagner (2016).
:type initial_const: `float`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
:param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.
:type decision_rule: `string`
|
art/attacks/elastic_net.py
|
__init__
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def __init__(self, classifier, confidence=0.0, targeted=True, learning_rate=0.01, binary_search_steps=9, max_iter=10000, beta=0.001, initial_const=0.001, batch_size=128, decision_rule='EN'):
"\n Create an ElasticNet attack instance.\n\n :param classifier: A trained model.\n :type classifier: :class:`.Classifier`\n :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther\n away, from the original input, but classified with higher confidence as the target class.\n :type confidence: `float`\n :param targeted: Should the attack target one specific class.\n :type targeted: `bool`\n :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better\n results but are slower to converge.\n :type learning_rate: `float`\n :param binary_search_steps: Number of times to adjust constant with binary search (positive value).\n :type binary_search_steps: `int`\n :param max_iter: The maximum number of iterations.\n :type max_iter: `int`\n :param beta: Hyperparameter trading off L2 minimization for L1 minimization.\n :type beta: `float`\n :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance\n and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in\n Carlini and Wagner (2016).\n :type initial_const: `float`\n :param batch_size: Internal size of batches on which adversarial samples are generated.\n :type batch_size: `int`\n :param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.\n :type decision_rule: `string`\n "
super(ElasticNet, self).__init__(classifier)
kwargs = {'confidence': confidence, 'targeted': targeted, 'learning_rate': learning_rate, 'binary_search_steps': binary_search_steps, 'max_iter': max_iter, 'beta': beta, 'initial_const': initial_const, 'batch_size': batch_size, 'decision_rule': decision_rule}
assert self.set_params(**kwargs)
|
def _loss(self, x, x_adv):
'\n Compute the loss function values.\n\n :param x: An array with the original input.\n :type x: `np.ndarray`\n :param x_adv: An array with the adversarial input.\n :type x_adv: `np.ndarray`\n :return: A tuple holding the current logits, l1 distance, l2 distance and elastic net loss.\n :rtype: `(np.ndarray, float, float, float)`\n '
l1dist = np.sum(np.abs((x - x_adv)).reshape(x.shape[0], (- 1)), axis=1)
l2dist = np.sum(np.square((x - x_adv)).reshape(x.shape[0], (- 1)), axis=1)
endist = ((self.beta * l1dist) + l2dist)
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
return (np.argmax(z, axis=1), l1dist, l2dist, endist)
| -4,277,042,601,095,220,700
|
Compute the loss function values.
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:return: A tuple holding the current logits, l1 distance, l2 distance and elastic net loss.
:rtype: `(np.ndarray, float, float, float)`
|
art/attacks/elastic_net.py
|
_loss
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def _loss(self, x, x_adv):
'\n Compute the loss function values.\n\n :param x: An array with the original input.\n :type x: `np.ndarray`\n :param x_adv: An array with the adversarial input.\n :type x_adv: `np.ndarray`\n :return: A tuple holding the current logits, l1 distance, l2 distance and elastic net loss.\n :rtype: `(np.ndarray, float, float, float)`\n '
l1dist = np.sum(np.abs((x - x_adv)).reshape(x.shape[0], (- 1)), axis=1)
l2dist = np.sum(np.square((x - x_adv)).reshape(x.shape[0], (- 1)), axis=1)
endist = ((self.beta * l1dist) + l2dist)
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
return (np.argmax(z, axis=1), l1dist, l2dist, endist)
|
def _gradient_of_loss(self, target, x, x_adv, c):
'\n Compute the gradient of the loss function.\n\n :param target: An array with the target class (one-hot encoded).\n :type target: `np.ndarray`\n :param x: An array with the original input.\n :type x: `np.ndarray`\n :param x_adv: An array with the adversarial input.\n :type x_adv: `np.ndarray`\n :param c: Weight of the loss term aiming for classification as target.\n :type c: `float`\n :return: An array with the gradient of the loss function.\n :type target: `np.ndarray`\n '
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
if self.targeted:
i_sub = np.argmax(target, axis=1)
i_add = np.argmax(((z * (1 - target)) + ((np.min(z, axis=1) - 1)[:, np.newaxis] * target)), axis=1)
else:
i_add = np.argmax(target, axis=1)
i_sub = np.argmax(((z * (1 - target)) + ((np.min(z, axis=1) - 1)[:, np.newaxis] * target)), axis=1)
loss_gradient = self.classifier.class_gradient(x_adv, label=i_add, logits=True)
loss_gradient -= self.classifier.class_gradient(x_adv, label=i_sub, logits=True)
loss_gradient = loss_gradient.reshape(x.shape)
c_mult = c
for _ in range((len(x.shape) - 1)):
c_mult = c_mult[:, np.newaxis]
loss_gradient *= c_mult
loss_gradient += (2 * (x_adv - x))
return loss_gradient
| 4,549,419,303,303,440,400
|
Compute the gradient of the loss function.
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param c: Weight of the loss term aiming for classification as target.
:type c: `float`
:return: An array with the gradient of the loss function.
:type target: `np.ndarray`
|
art/attacks/elastic_net.py
|
_gradient_of_loss
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def _gradient_of_loss(self, target, x, x_adv, c):
'\n Compute the gradient of the loss function.\n\n :param target: An array with the target class (one-hot encoded).\n :type target: `np.ndarray`\n :param x: An array with the original input.\n :type x: `np.ndarray`\n :param x_adv: An array with the adversarial input.\n :type x_adv: `np.ndarray`\n :param c: Weight of the loss term aiming for classification as target.\n :type c: `float`\n :return: An array with the gradient of the loss function.\n :type target: `np.ndarray`\n '
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
if self.targeted:
i_sub = np.argmax(target, axis=1)
i_add = np.argmax(((z * (1 - target)) + ((np.min(z, axis=1) - 1)[:, np.newaxis] * target)), axis=1)
else:
i_add = np.argmax(target, axis=1)
i_sub = np.argmax(((z * (1 - target)) + ((np.min(z, axis=1) - 1)[:, np.newaxis] * target)), axis=1)
loss_gradient = self.classifier.class_gradient(x_adv, label=i_add, logits=True)
loss_gradient -= self.classifier.class_gradient(x_adv, label=i_sub, logits=True)
loss_gradient = loss_gradient.reshape(x.shape)
c_mult = c
for _ in range((len(x.shape) - 1)):
c_mult = c_mult[:, np.newaxis]
loss_gradient *= c_mult
loss_gradient += (2 * (x_adv - x))
return loss_gradient
|
def _decay_learning_rate(self, global_step, end_learning_rate, decay_steps):
'\n Applies a square-root decay to the learning rate.\n\n :param global_step: Global step to use for the decay computation.\n :type global_step: `int`\n :param end_learning_rate: The minimal end learning rate.\n :type end_learning_rate: `float`\n :param decay_steps: Number of decayed steps.\n :type decay_steps: `int`\n :return: The decayed learning rate\n :rtype: `float`\n '
decayed_learning_rate = (((self.learning_rate - end_learning_rate) * ((1 - (global_step / decay_steps)) ** 2)) + end_learning_rate)
return decayed_learning_rate
| -6,411,596,237,137,459,000
|
Applies a square-root decay to the learning rate.
:param global_step: Global step to use for the decay computation.
:type global_step: `int`
:param end_learning_rate: The minimal end learning rate.
:type end_learning_rate: `float`
:param decay_steps: Number of decayed steps.
:type decay_steps: `int`
:return: The decayed learning rate
:rtype: `float`
|
art/attacks/elastic_net.py
|
_decay_learning_rate
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def _decay_learning_rate(self, global_step, end_learning_rate, decay_steps):
'\n Applies a square-root decay to the learning rate.\n\n :param global_step: Global step to use for the decay computation.\n :type global_step: `int`\n :param end_learning_rate: The minimal end learning rate.\n :type end_learning_rate: `float`\n :param decay_steps: Number of decayed steps.\n :type decay_steps: `int`\n :return: The decayed learning rate\n :rtype: `float`\n '
decayed_learning_rate = (((self.learning_rate - end_learning_rate) * ((1 - (global_step / decay_steps)) ** 2)) + end_learning_rate)
return decayed_learning_rate
|
def generate(self, x, **kwargs):
'\n Generate adversarial samples and return them in an array.\n\n :param x: An array with the original inputs to be attacked.\n :type x: `np.ndarray`\n :param y: If `self.targeted` is true, then `y` represents the target labels. Otherwise, the targets are the\n original class labels.\n :type y: `np.ndarray`\n :return: An array holding the adversarial examples.\n :rtype: `np.ndarray`\n '
x_adv = x.astype(NUMPY_DTYPE)
(clip_min, clip_max) = self.classifier.clip_values
params_cpy = dict(kwargs)
y = params_cpy.pop(str('y'), None)
self.set_params(**params_cpy)
if (self.targeted and (y is None)):
raise ValueError('Target labels `y` need to be provided for a targeted attack.')
if (y is None):
y = get_labels_np_array(self.classifier.predict(x, logits=False))
nb_batches = int(np.ceil((x_adv.shape[0] / float(self.batch_size))))
for batch_id in range(nb_batches):
logger.debug('Processing batch %i out of %i', batch_id, nb_batches)
(batch_index_1, batch_index_2) = ((batch_id * self.batch_size), ((batch_id + 1) * self.batch_size))
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
x_adv[batch_index_1:batch_index_2] = self._generate_batch(x_batch, y_batch)
x_adv = np.clip(x_adv, clip_min, clip_max)
logger.info('Success rate of EAD attack: %.2f%%', (100 * compute_success(self.classifier, x, y, x_adv, self.targeted)))
return x_adv
| 2,876,953,417,174,144,500
|
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y` represents the target labels. Otherwise, the targets are the
original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
|
art/attacks/elastic_net.py
|
generate
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def generate(self, x, **kwargs):
'\n Generate adversarial samples and return them in an array.\n\n :param x: An array with the original inputs to be attacked.\n :type x: `np.ndarray`\n :param y: If `self.targeted` is true, then `y` represents the target labels. Otherwise, the targets are the\n original class labels.\n :type y: `np.ndarray`\n :return: An array holding the adversarial examples.\n :rtype: `np.ndarray`\n '
x_adv = x.astype(NUMPY_DTYPE)
(clip_min, clip_max) = self.classifier.clip_values
params_cpy = dict(kwargs)
y = params_cpy.pop(str('y'), None)
self.set_params(**params_cpy)
if (self.targeted and (y is None)):
raise ValueError('Target labels `y` need to be provided for a targeted attack.')
if (y is None):
y = get_labels_np_array(self.classifier.predict(x, logits=False))
nb_batches = int(np.ceil((x_adv.shape[0] / float(self.batch_size))))
for batch_id in range(nb_batches):
logger.debug('Processing batch %i out of %i', batch_id, nb_batches)
(batch_index_1, batch_index_2) = ((batch_id * self.batch_size), ((batch_id + 1) * self.batch_size))
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
x_adv[batch_index_1:batch_index_2] = self._generate_batch(x_batch, y_batch)
x_adv = np.clip(x_adv, clip_min, clip_max)
logger.info('Success rate of EAD attack: %.2f%%', (100 * compute_success(self.classifier, x, y, x_adv, self.targeted)))
return x_adv
|
def _generate_batch(self, x_batch, y_batch):
'\n Run the attack on a batch of images and labels.\n\n :param x_batch: A batch of original examples.\n :type x_batch: `np.ndarray`\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :return: A batch of adversarial examples.\n :rtype: `np.ndarray`\n '
c = (self.initial_const * np.ones(x_batch.shape[0]))
c_lower_bound = np.zeros(x_batch.shape[0])
c_upper_bound = (100000000000.0 * np.ones(x_batch.shape[0]))
o_best_dist = (np.inf * np.ones(x_batch.shape[0]))
o_best_attack = x_batch.copy()
for bss in range(self.binary_search_steps):
logger.debug('Binary search step %i out of %i (c_mean==%f)', bss, self.binary_search_steps, np.mean(c))
(best_dist, best_label, best_attack) = self._generate_bss(x_batch, y_batch, c)
o_best_attack[(best_dist < o_best_dist)] = best_attack[(best_dist < o_best_dist)]
o_best_dist[(best_dist < o_best_dist)] = best_dist[(best_dist < o_best_dist)]
(c, c_lower_bound, c_upper_bound) = self._update_const(y_batch, best_label, c, c_lower_bound, c_upper_bound)
return o_best_attack
| 3,505,459,309,053,157,000
|
Run the attack on a batch of images and labels.
:param x_batch: A batch of original examples.
:type x_batch: `np.ndarray`
:param y_batch: A batch of targets (0-1 hot).
:type y_batch: `np.ndarray`
:return: A batch of adversarial examples.
:rtype: `np.ndarray`
|
art/attacks/elastic_net.py
|
_generate_batch
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def _generate_batch(self, x_batch, y_batch):
'\n Run the attack on a batch of images and labels.\n\n :param x_batch: A batch of original examples.\n :type x_batch: `np.ndarray`\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :return: A batch of adversarial examples.\n :rtype: `np.ndarray`\n '
c = (self.initial_const * np.ones(x_batch.shape[0]))
c_lower_bound = np.zeros(x_batch.shape[0])
c_upper_bound = (100000000000.0 * np.ones(x_batch.shape[0]))
o_best_dist = (np.inf * np.ones(x_batch.shape[0]))
o_best_attack = x_batch.copy()
for bss in range(self.binary_search_steps):
logger.debug('Binary search step %i out of %i (c_mean==%f)', bss, self.binary_search_steps, np.mean(c))
(best_dist, best_label, best_attack) = self._generate_bss(x_batch, y_batch, c)
o_best_attack[(best_dist < o_best_dist)] = best_attack[(best_dist < o_best_dist)]
o_best_dist[(best_dist < o_best_dist)] = best_dist[(best_dist < o_best_dist)]
(c, c_lower_bound, c_upper_bound) = self._update_const(y_batch, best_label, c, c_lower_bound, c_upper_bound)
return o_best_attack
|
def _update_const(self, y_batch, best_label, c, c_lower_bound, c_upper_bound):
'\n Update constants.\n\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :param best_label: A batch of best labels.\n :type best_label: `np.ndarray`\n :param c: A batch of constants.\n :type c: `np.ndarray`\n :param c_lower_bound: A batch of lower bound constants.\n :type c_lower_bound: `np.ndarray`\n :param c_upper_bound: A batch of upper bound constants.\n :type c_upper_bound: `np.ndarray`\n :return: A tuple of three batches of updated constants and lower/upper bounds.\n :rtype: `tuple`\n '
def compare(o1, o2):
if self.targeted:
return (o1 == o2)
else:
return (o1 != o2)
for i in range(len(c)):
if (compare(best_label[i], np.argmax(y_batch[i])) and (best_label[i] != (- np.inf))):
c_upper_bound[i] = min(c_upper_bound[i], c[i])
if (c_upper_bound[i] < 1000000000.0):
c[i] = ((c_lower_bound[i] + c_upper_bound[i]) / 2.0)
else:
c_lower_bound[i] = max(c_lower_bound[i], c[i])
if (c_upper_bound[i] < 1000000000.0):
c[i] = ((c_lower_bound[i] + c_upper_bound[i]) / 2.0)
else:
c[i] *= 10
return (c, c_lower_bound, c_upper_bound)
| 4,948,646,841,644,730,000
|
Update constants.
:param y_batch: A batch of targets (0-1 hot).
:type y_batch: `np.ndarray`
:param best_label: A batch of best labels.
:type best_label: `np.ndarray`
:param c: A batch of constants.
:type c: `np.ndarray`
:param c_lower_bound: A batch of lower bound constants.
:type c_lower_bound: `np.ndarray`
:param c_upper_bound: A batch of upper bound constants.
:type c_upper_bound: `np.ndarray`
:return: A tuple of three batches of updated constants and lower/upper bounds.
:rtype: `tuple`
|
art/attacks/elastic_net.py
|
_update_const
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def _update_const(self, y_batch, best_label, c, c_lower_bound, c_upper_bound):
'\n Update constants.\n\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :param best_label: A batch of best labels.\n :type best_label: `np.ndarray`\n :param c: A batch of constants.\n :type c: `np.ndarray`\n :param c_lower_bound: A batch of lower bound constants.\n :type c_lower_bound: `np.ndarray`\n :param c_upper_bound: A batch of upper bound constants.\n :type c_upper_bound: `np.ndarray`\n :return: A tuple of three batches of updated constants and lower/upper bounds.\n :rtype: `tuple`\n '
def compare(o1, o2):
if self.targeted:
return (o1 == o2)
else:
return (o1 != o2)
for i in range(len(c)):
if (compare(best_label[i], np.argmax(y_batch[i])) and (best_label[i] != (- np.inf))):
c_upper_bound[i] = min(c_upper_bound[i], c[i])
if (c_upper_bound[i] < 1000000000.0):
c[i] = ((c_lower_bound[i] + c_upper_bound[i]) / 2.0)
else:
c_lower_bound[i] = max(c_lower_bound[i], c[i])
if (c_upper_bound[i] < 1000000000.0):
c[i] = ((c_lower_bound[i] + c_upper_bound[i]) / 2.0)
else:
c[i] *= 10
return (c, c_lower_bound, c_upper_bound)
|
def _generate_bss(self, x_batch, y_batch, c):
'\n Generate adversarial examples for a batch of inputs with a specific batch of constants.\n\n :param x_batch: A batch of original examples.\n :type x_batch: `np.ndarray`\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :param c: A batch of constants.\n :type c: `np.ndarray`\n :return: A tuple of best elastic distances, best labels, best attacks\n :rtype: `tuple`\n '
def compare(o1, o2):
if self.targeted:
return (o1 == o2)
else:
return (o1 != o2)
best_dist = (np.inf * np.ones(x_batch.shape[0]))
best_label = ([(- np.inf)] * x_batch.shape[0])
best_attack = x_batch.copy()
x_adv = x_batch.copy()
y_adv = x_batch.copy()
for it in range(self.max_iter):
logger.debug('Iteration step %i out of %i', it, self.max_iter)
lr = self._decay_learning_rate(global_step=it, end_learning_rate=0, decay_steps=self.max_iter)
grad = self._gradient_of_loss(target=y_batch, x=x_batch, x_adv=y_adv, c=c)
x_adv_next = self._shrinkage_threshold((y_adv - (lr * grad)), x_batch, self.beta)
y_adv = (x_adv_next + (((1.0 * it) / (it + 3)) * (x_adv_next - x_adv)))
x_adv = x_adv_next
(z, l1dist, l2dist, endist) = self._loss(x=x_batch, x_adv=x_adv)
if (self.decision_rule == 'EN'):
zip_set = zip(endist, z)
elif (self.decision_rule == 'L1'):
zip_set = zip(l1dist, z)
elif (self.decision_rule == 'L2'):
zip_set = zip(l2dist, z)
else:
raise ValueError('The decision rule only supports `EN`, `L1`, `L2`.')
for (j, (d, s)) in enumerate(zip_set):
if ((d < best_dist[j]) and compare(s, np.argmax(y_batch[j]))):
best_dist[j] = d
best_attack[j] = x_adv[j]
best_label[j] = s
return (best_dist, best_label, best_attack)
| 6,796,829,875,461,924,000
|
Generate adversarial examples for a batch of inputs with a specific batch of constants.
:param x_batch: A batch of original examples.
:type x_batch: `np.ndarray`
:param y_batch: A batch of targets (0-1 hot).
:type y_batch: `np.ndarray`
:param c: A batch of constants.
:type c: `np.ndarray`
:return: A tuple of best elastic distances, best labels, best attacks
:rtype: `tuple`
|
art/attacks/elastic_net.py
|
_generate_bss
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def _generate_bss(self, x_batch, y_batch, c):
'\n Generate adversarial examples for a batch of inputs with a specific batch of constants.\n\n :param x_batch: A batch of original examples.\n :type x_batch: `np.ndarray`\n :param y_batch: A batch of targets (0-1 hot).\n :type y_batch: `np.ndarray`\n :param c: A batch of constants.\n :type c: `np.ndarray`\n :return: A tuple of best elastic distances, best labels, best attacks\n :rtype: `tuple`\n '
def compare(o1, o2):
if self.targeted:
return (o1 == o2)
else:
return (o1 != o2)
best_dist = (np.inf * np.ones(x_batch.shape[0]))
best_label = ([(- np.inf)] * x_batch.shape[0])
best_attack = x_batch.copy()
x_adv = x_batch.copy()
y_adv = x_batch.copy()
for it in range(self.max_iter):
logger.debug('Iteration step %i out of %i', it, self.max_iter)
lr = self._decay_learning_rate(global_step=it, end_learning_rate=0, decay_steps=self.max_iter)
grad = self._gradient_of_loss(target=y_batch, x=x_batch, x_adv=y_adv, c=c)
x_adv_next = self._shrinkage_threshold((y_adv - (lr * grad)), x_batch, self.beta)
y_adv = (x_adv_next + (((1.0 * it) / (it + 3)) * (x_adv_next - x_adv)))
x_adv = x_adv_next
(z, l1dist, l2dist, endist) = self._loss(x=x_batch, x_adv=x_adv)
if (self.decision_rule == 'EN'):
zip_set = zip(endist, z)
elif (self.decision_rule == 'L1'):
zip_set = zip(l1dist, z)
elif (self.decision_rule == 'L2'):
zip_set = zip(l2dist, z)
else:
raise ValueError('The decision rule only supports `EN`, `L1`, `L2`.')
for (j, (d, s)) in enumerate(zip_set):
if ((d < best_dist[j]) and compare(s, np.argmax(y_batch[j]))):
best_dist[j] = d
best_attack[j] = x_adv[j]
best_label[j] = s
return (best_dist, best_label, best_attack)
|
@staticmethod
def _shrinkage_threshold(z, x, beta):
'\n Implement the element-wise projected shrinkage-threshold function.\n\n :param z: a batch of examples.\n :type z: `np.ndarray`\n :param x: a batch of original examples.\n :type x: `np.ndarray`\n :param beta: the shrink parameter.\n :type beta: `float`\n :return: a shrinked version of z.\n :rtype: `np.ndarray`\n '
cond1 = ((z - x) > beta)
cond2 = (np.abs((z - x)) <= beta)
cond3 = ((z - x) < (- beta))
upper = np.minimum((z - beta), 1.0)
lower = np.maximum((z + beta), 0.0)
result = (((cond1 * upper) + (cond2 * x)) + (cond3 * lower))
return result
| -1,600,375,961,511,805,000
|
Implement the element-wise projected shrinkage-threshold function.
:param z: a batch of examples.
:type z: `np.ndarray`
:param x: a batch of original examples.
:type x: `np.ndarray`
:param beta: the shrink parameter.
:type beta: `float`
:return: a shrinked version of z.
:rtype: `np.ndarray`
|
art/attacks/elastic_net.py
|
_shrinkage_threshold
|
Viktour19/adversarial-robustness-toolbox
|
python
|
@staticmethod
def _shrinkage_threshold(z, x, beta):
'\n Implement the element-wise projected shrinkage-threshold function.\n\n :param z: a batch of examples.\n :type z: `np.ndarray`\n :param x: a batch of original examples.\n :type x: `np.ndarray`\n :param beta: the shrink parameter.\n :type beta: `float`\n :return: a shrinked version of z.\n :rtype: `np.ndarray`\n '
cond1 = ((z - x) > beta)
cond2 = (np.abs((z - x)) <= beta)
cond3 = ((z - x) < (- beta))
upper = np.minimum((z - beta), 1.0)
lower = np.maximum((z + beta), 0.0)
result = (((cond1 * upper) + (cond2 * x)) + (cond3 * lower))
return result
|
def set_params(self, **kwargs):
"\n Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.\n\n :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther\n away, from the original input, but classified with higher confidence as the target class.\n :type confidence: `float`\n :param targeted: Should the attack target one specific class.\n :type targeted: `bool`\n :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better\n results but are slower to converge.\n :type learning_rate: `float`\n :param binary_search_steps: Number of times to adjust constant with binary search (positive value).\n :type binary_search_steps: `int`\n :param max_iter: The maximum number of iterations.\n :type max_iter: `int`\n :param beta: Hyperparameter trading off L2 minimization for L1 minimization.\n :type beta: `float`\n :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance\n and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in\n Carlini and Wagner (2016).\n :type initial_const: `float`\n :param batch_size: Internal size of batches on which adversarial samples are generated.\n :type batch_size: `int`\n :param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.\n :type decision_rule: `string`\n "
super(ElasticNet, self).set_params(**kwargs)
if ((type(self.binary_search_steps) is not int) or (self.binary_search_steps < 0)):
raise ValueError('The number of binary search steps must be a non-negative integer.')
if ((type(self.max_iter) is not int) or (self.max_iter < 0)):
raise ValueError('The number of iterations must be a non-negative integer.')
if ((type(self.batch_size) is not int) or (self.batch_size < 1)):
raise ValueError('The batch size must be an integer greater than zero.')
if ((not isinstance(self.decision_rule, six.string_types)) or (self.decision_rule not in ['EN', 'L1', 'L2'])):
raise ValueError('The decision rule only supports `EN`, `L1`, `L2`.')
return True
| -3,578,810,808,230,838,000
|
Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther
away, from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
results but are slower to converge.
:type learning_rate: `float`
:param binary_search_steps: Number of times to adjust constant with binary search (positive value).
:type binary_search_steps: `int`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param beta: Hyperparameter trading off L2 minimization for L1 minimization.
:type beta: `float`
:param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance
and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
Carlini and Wagner (2016).
:type initial_const: `float`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
:param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.
:type decision_rule: `string`
|
art/attacks/elastic_net.py
|
set_params
|
Viktour19/adversarial-robustness-toolbox
|
python
|
def set_params(self, **kwargs):
"\n Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.\n\n :param confidence: Confidence of adversarial examples: a higher value produces examples that are farther\n away, from the original input, but classified with higher confidence as the target class.\n :type confidence: `float`\n :param targeted: Should the attack target one specific class.\n :type targeted: `bool`\n :param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better\n results but are slower to converge.\n :type learning_rate: `float`\n :param binary_search_steps: Number of times to adjust constant with binary search (positive value).\n :type binary_search_steps: `int`\n :param max_iter: The maximum number of iterations.\n :type max_iter: `int`\n :param beta: Hyperparameter trading off L2 minimization for L1 minimization.\n :type beta: `float`\n :param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance\n and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in\n Carlini and Wagner (2016).\n :type initial_const: `float`\n :param batch_size: Internal size of batches on which adversarial samples are generated.\n :type batch_size: `int`\n :param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.\n :type decision_rule: `string`\n "
super(ElasticNet, self).set_params(**kwargs)
if ((type(self.binary_search_steps) is not int) or (self.binary_search_steps < 0)):
raise ValueError('The number of binary search steps must be a non-negative integer.')
if ((type(self.max_iter) is not int) or (self.max_iter < 0)):
raise ValueError('The number of iterations must be a non-negative integer.')
if ((type(self.batch_size) is not int) or (self.batch_size < 1)):
raise ValueError('The batch size must be an integer greater than zero.')
if ((not isinstance(self.decision_rule, six.string_types)) or (self.decision_rule not in ['EN', 'L1', 'L2'])):
raise ValueError('The decision rule only supports `EN`, `L1`, `L2`.')
return True
|
def begin_delete(self, resource_group_name, service_endpoint_policy_name, service_endpoint_policy_definition_name, **kwargs):
"Deletes the specified ServiceEndpoint policy definitions.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the Service Endpoint Policy.\n :type service_endpoint_policy_name: str\n :param service_endpoint_policy_definition_name: The name of the service endpoint policy\n definition.\n :type service_endpoint_policy_definition_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the ARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, service_endpoint_policy_name=service_endpoint_policy_name, service_endpoint_policy_definition_name=service_endpoint_policy_definition_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'serviceEndpointPolicyDefinitionName': self._serialize.url('service_endpoint_policy_definition_name', service_endpoint_policy_definition_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 372,264,440,778,412,100
|
Deletes the specified ServiceEndpoint policy definitions.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the Service Endpoint Policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_service_endpoint_policy_definitions_operations.py
|
begin_delete
|
AriZavala2/azure-sdk-for-python
|
python
|
def begin_delete(self, resource_group_name, service_endpoint_policy_name, service_endpoint_policy_definition_name, **kwargs):
"Deletes the specified ServiceEndpoint policy definitions.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the Service Endpoint Policy.\n :type service_endpoint_policy_name: str\n :param service_endpoint_policy_definition_name: The name of the service endpoint policy\n definition.\n :type service_endpoint_policy_definition_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the ARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, service_endpoint_policy_name=service_endpoint_policy_name, service_endpoint_policy_definition_name=service_endpoint_policy_definition_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'serviceEndpointPolicyDefinitionName': self._serialize.url('service_endpoint_policy_definition_name', service_endpoint_policy_definition_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def get(self, resource_group_name, service_endpoint_policy_name, service_endpoint_policy_definition_name, **kwargs):
'Get the specified service endpoint policy definitions from service endpoint policy.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the service endpoint policy name.\n :type service_endpoint_policy_name: str\n :param service_endpoint_policy_definition_name: The name of the service endpoint policy\n definition name.\n :type service_endpoint_policy_definition_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ServiceEndpointPolicyDefinition, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-02-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'serviceEndpointPolicyDefinitionName': self._serialize.url('service_endpoint_policy_definition_name', service_endpoint_policy_definition_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| -5,661,216,426,108,417,000
|
Get the specified service endpoint policy definitions from service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_service_endpoint_policy_definitions_operations.py
|
get
|
AriZavala2/azure-sdk-for-python
|
python
|
def get(self, resource_group_name, service_endpoint_policy_name, service_endpoint_policy_definition_name, **kwargs):
'Get the specified service endpoint policy definitions from service endpoint policy.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the service endpoint policy name.\n :type service_endpoint_policy_name: str\n :param service_endpoint_policy_definition_name: The name of the service endpoint policy\n definition name.\n :type service_endpoint_policy_definition_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ServiceEndpointPolicyDefinition, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-02-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'serviceEndpointPolicyDefinitionName': self._serialize.url('service_endpoint_policy_definition_name', service_endpoint_policy_definition_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
def begin_create_or_update(self, resource_group_name, service_endpoint_policy_name, service_endpoint_policy_definition_name, service_endpoint_policy_definitions, **kwargs):
"Creates or updates a service endpoint policy definition in the specified service endpoint\n policy.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the service endpoint policy.\n :type service_endpoint_policy_name: str\n :param service_endpoint_policy_definition_name: The name of the service endpoint policy\n definition name.\n :type service_endpoint_policy_definition_name: str\n :param service_endpoint_policy_definitions: Parameters supplied to the create or update service\n endpoint policy operation.\n :type service_endpoint_policy_definitions: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the ARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, service_endpoint_policy_name=service_endpoint_policy_name, service_endpoint_policy_definition_name=service_endpoint_policy_definition_name, service_endpoint_policy_definitions=service_endpoint_policy_definitions, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'serviceEndpointPolicyDefinitionName': self._serialize.url('service_endpoint_policy_definition_name', service_endpoint_policy_definition_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 2,602,152,074,877,339,000
|
Creates or updates a service endpoint policy definition in the specified service endpoint
policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:param service_endpoint_policy_definitions: Parameters supplied to the create or update service
endpoint policy operation.
:type service_endpoint_policy_definitions: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_service_endpoint_policy_definitions_operations.py
|
begin_create_or_update
|
AriZavala2/azure-sdk-for-python
|
python
|
def begin_create_or_update(self, resource_group_name, service_endpoint_policy_name, service_endpoint_policy_definition_name, service_endpoint_policy_definitions, **kwargs):
"Creates or updates a service endpoint policy definition in the specified service endpoint\n policy.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the service endpoint policy.\n :type service_endpoint_policy_name: str\n :param service_endpoint_policy_definition_name: The name of the service endpoint policy\n definition name.\n :type service_endpoint_policy_definition_name: str\n :param service_endpoint_policy_definitions: Parameters supplied to the create or update service\n endpoint policy operation.\n :type service_endpoint_policy_definitions: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the ARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, service_endpoint_policy_name=service_endpoint_policy_name, service_endpoint_policy_definition_name=service_endpoint_policy_definition_name, service_endpoint_policy_definitions=service_endpoint_policy_definitions, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'serviceEndpointPolicyDefinitionName': self._serialize.url('service_endpoint_policy_definition_name', service_endpoint_policy_definition_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def list_by_resource_group(self, resource_group_name, service_endpoint_policy_name, **kwargs):
'Gets all service endpoint policy definitions in a service end point policy.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the service endpoint policy name.\n :type service_endpoint_policy_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinitionListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-02-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
| -7,057,014,527,002,522,000
|
Gets all service endpoint policy definitions in a service end point policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_service_endpoint_policy_definitions_operations.py
|
list_by_resource_group
|
AriZavala2/azure-sdk-for-python
|
python
|
def list_by_resource_group(self, resource_group_name, service_endpoint_policy_name, **kwargs):
'Gets all service endpoint policy definitions in a service end point policy.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param service_endpoint_policy_name: The name of the service endpoint policy name.\n :type service_endpoint_policy_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinitionListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-02-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url('service_endpoint_policy_name', service_endpoint_policy_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
|
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
'Register args.'
return adam(parser)
| -6,958,970,453,263,638,000
|
Register args.
|
espnet/optimizer/pytorch.py
|
add_arguments
|
18445864529/espnet
|
python
|
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
return adam(parser)
|
@staticmethod
def from_args(target, args: argparse.Namespace):
'Initialize optimizer from argparse Namespace.\n\n Args:\n target: for pytorch `model.parameters()`,\n for chainer `model`\n args (argparse.Namespace): parsed command-line args\n\n '
return torch.optim.Adam(target, lr=args.lr, weight_decay=args.weight_decay, betas=(args.beta1, args.beta2))
| 8,811,270,693,832,894,000
|
Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
|
espnet/optimizer/pytorch.py
|
from_args
|
18445864529/espnet
|
python
|
@staticmethod
def from_args(target, args: argparse.Namespace):
'Initialize optimizer from argparse Namespace.\n\n Args:\n target: for pytorch `model.parameters()`,\n for chainer `model`\n args (argparse.Namespace): parsed command-line args\n\n '
return torch.optim.Adam(target, lr=args.lr, weight_decay=args.weight_decay, betas=(args.beta1, args.beta2))
|
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
'Register args.'
return sgd(parser)
| -6,548,284,684,296,972,000
|
Register args.
|
espnet/optimizer/pytorch.py
|
add_arguments
|
18445864529/espnet
|
python
|
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
return sgd(parser)
|
@staticmethod
def from_args(target, args: argparse.Namespace):
'Initialize optimizer from argparse Namespace.\n\n Args:\n target: for pytorch `model.parameters()`,\n for chainer `model`\n args (argparse.Namespace): parsed command-line args\n\n '
return torch.optim.SGD(target, lr=args.lr, weight_decay=args.weight_decay)
| -1,372,828,614,147,998,000
|
Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
|
espnet/optimizer/pytorch.py
|
from_args
|
18445864529/espnet
|
python
|
@staticmethod
def from_args(target, args: argparse.Namespace):
'Initialize optimizer from argparse Namespace.\n\n Args:\n target: for pytorch `model.parameters()`,\n for chainer `model`\n args (argparse.Namespace): parsed command-line args\n\n '
return torch.optim.SGD(target, lr=args.lr, weight_decay=args.weight_decay)
|
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
'Register args.'
return adadelta(parser)
| 1,074,600,873,994,255,400
|
Register args.
|
espnet/optimizer/pytorch.py
|
add_arguments
|
18445864529/espnet
|
python
|
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
return adadelta(parser)
|
@staticmethod
def from_args(target, args: argparse.Namespace):
'Initialize optimizer from argparse Namespace.\n\n Args:\n target: for pytorch `model.parameters()`,\n for chainer `model`\n args (argparse.Namespace): parsed command-line args\n\n '
return torch.optim.Adadelta(target, rho=args.rho, eps=args.eps, weight_decay=args.weight_decay)
| -835,190,803,597,173,800
|
Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
|
espnet/optimizer/pytorch.py
|
from_args
|
18445864529/espnet
|
python
|
@staticmethod
def from_args(target, args: argparse.Namespace):
'Initialize optimizer from argparse Namespace.\n\n Args:\n target: for pytorch `model.parameters()`,\n for chainer `model`\n args (argparse.Namespace): parsed command-line args\n\n '
return torch.optim.Adadelta(target, rho=args.rho, eps=args.eps, weight_decay=args.weight_decay)
|
def save_form(self, request, form, change):
'Here we pluck out the data to create a new cloned repo.\n\n Form is an instance of NewRepoForm.\n '
name = form.cleaned_data['name']
origin_url = form.cleaned_data['origin_url']
res = ClonedRepo(name=name, origin=origin_url)
LOG.info(('New repo form produced %s' % str(res)))
form.save(commit=False)
return res
| -5,554,163,953,822,890,000
|
Here we pluck out the data to create a new cloned repo.
Form is an instance of NewRepoForm.
|
registry/admin.py
|
save_form
|
Tinche/django-bower-cache
|
python
|
def save_form(self, request, form, change):
'Here we pluck out the data to create a new cloned repo.\n\n Form is an instance of NewRepoForm.\n '
name = form.cleaned_data['name']
origin_url = form.cleaned_data['origin_url']
res = ClonedRepo(name=name, origin=origin_url)
LOG.info(('New repo form produced %s' % str(res)))
form.save(commit=False)
return res
|
def get_readonly_fields(self, request, obj=None):
'Hide the origin field from editing, but not creation.'
return (('origin',) if obj else ())
| 2,997,009,665,775,063,600
|
Hide the origin field from editing, but not creation.
|
registry/admin.py
|
get_readonly_fields
|
Tinche/django-bower-cache
|
python
|
def get_readonly_fields(self, request, obj=None):
return (('origin',) if obj else ())
|
def add_view(self, request, **kwargs):
"A custom add_view, to catch exceptions from 'save_model'.\n\n Just to be clear, this is very filthy.\n "
try:
return super(ClonedRepoAdmin, self).add_view(request, **kwargs)
except ValidationError:
return redirect(request.path)
| 5,035,110,400,913,509,000
|
A custom add_view, to catch exceptions from 'save_model'.
Just to be clear, this is very filthy.
|
registry/admin.py
|
add_view
|
Tinche/django-bower-cache
|
python
|
def add_view(self, request, **kwargs):
"A custom add_view, to catch exceptions from 'save_model'.\n\n Just to be clear, this is very filthy.\n "
try:
return super(ClonedRepoAdmin, self).add_view(request, **kwargs)
except ValidationError:
return redirect(request.path)
|
def git_pull_view(self, request, repo_name):
'Perform a git pull and redirect back to the repo.'
LOG.info(('Pull requested for %s.' % repo_name))
repo = get_object_or_404(self.model, name=repo_name)
repo.pull()
self.message_user(request, ('Repo %s successfully updated.' % repo_name), level=messages.SUCCESS)
return redirect('admin:registry_clonedrepo_change', repo_name)
| 1,953,967,152,983,711,000
|
Perform a git pull and redirect back to the repo.
|
registry/admin.py
|
git_pull_view
|
Tinche/django-bower-cache
|
python
|
def git_pull_view(self, request, repo_name):
LOG.info(('Pull requested for %s.' % repo_name))
repo = get_object_or_404(self.model, name=repo_name)
repo.pull()
self.message_user(request, ('Repo %s successfully updated.' % repo_name), level=messages.SUCCESS)
return redirect('admin:registry_clonedrepo_change', repo_name)
|
def update_all_view(self, request):
'Update all repositories and redirect back to the repo list.'
LOG.info('Total update requested.')
total_count = errors = 0
for repo in self.model.objects.all():
total_count += 1
try:
repo.pull()
except:
LOG.exception(('While updating %s.' % repo))
errors += 1
msg = '{0} repos successfully updated, {1} failed.'.format(total_count, errors)
self.message_user(request, msg, level=messages.SUCCESS)
return redirect('admin:registry_clonedrepo_changelist')
| 3,799,456,152,004,995,600
|
Update all repositories and redirect back to the repo list.
|
registry/admin.py
|
update_all_view
|
Tinche/django-bower-cache
|
python
|
def update_all_view(self, request):
LOG.info('Total update requested.')
total_count = errors = 0
for repo in self.model.objects.all():
total_count += 1
try:
repo.pull()
except:
LOG.exception(('While updating %s.' % repo))
errors += 1
msg = '{0} repos successfully updated, {1} failed.'.format(total_count, errors)
self.message_user(request, msg, level=messages.SUCCESS)
return redirect('admin:registry_clonedrepo_changelist')
|
def clean(self):
'Validate the new repo form.\n\n Might perform a request to upstream Bower.'
cleaned_data = super(NewRepoForm, self).clean()
origin_url = cleaned_data['origin_url']
origin_source = cleaned_data['origin_source']
if ((origin_source == 'origin_url') and (not origin_url)):
msg = 'Please provide an origin URL.'
self._errors['origin_url'] = self.error_class([msg])
del cleaned_data['origin_url']
del cleaned_data['origin_source']
elif (origin_source == 'upstream'):
upstream = settings.UPSTREAM_BOWER_REGISTRY
name = cleaned_data['name']
try:
upstream_pkg = bowerlib.get_package(upstream, name)
except IOError as exc:
msg = str(exc)
self._errors['origin_source'] = self.error_class([msg])
else:
if (not upstream_pkg):
msg = ('Upstream registry has no knowledge of %s.' % name)
self._errors['name'] = self.error_class([msg])
del cleaned_data['name']
else:
upstream_origin_url = upstream_pkg['url']
cleaned_data['origin_url'] = upstream_origin_url
return cleaned_data
| -8,217,690,029,197,501,000
|
Validate the new repo form.
Might perform a request to upstream Bower.
|
registry/admin.py
|
clean
|
Tinche/django-bower-cache
|
python
|
def clean(self):
'Validate the new repo form.\n\n Might perform a request to upstream Bower.'
cleaned_data = super(NewRepoForm, self).clean()
origin_url = cleaned_data['origin_url']
origin_source = cleaned_data['origin_source']
if ((origin_source == 'origin_url') and (not origin_url)):
msg = 'Please provide an origin URL.'
self._errors['origin_url'] = self.error_class([msg])
del cleaned_data['origin_url']
del cleaned_data['origin_source']
elif (origin_source == 'upstream'):
upstream = settings.UPSTREAM_BOWER_REGISTRY
name = cleaned_data['name']
try:
upstream_pkg = bowerlib.get_package(upstream, name)
except IOError as exc:
msg = str(exc)
self._errors['origin_source'] = self.error_class([msg])
else:
if (not upstream_pkg):
msg = ('Upstream registry has no knowledge of %s.' % name)
self._errors['name'] = self.error_class([msg])
del cleaned_data['name']
else:
upstream_origin_url = upstream_pkg['url']
cleaned_data['origin_url'] = upstream_origin_url
return cleaned_data
|
def test_postcode(self, faker, num_samples):
'https://stackoverflow.com/questions/33391412/validation-for-irish-eircode'
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch('(?:^[AC-FHKNPRTV-Y][0-9]{2}|D6W)[ -]?[0-9AC-FHKNPRTV-Y]{4}$', postcode)
| 1,849,701,552,127,415,300
|
https://stackoverflow.com/questions/33391412/validation-for-irish-eircode
|
tests/providers/test_address.py
|
test_postcode
|
Pipoline/faker
|
python
|
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch('(?:^[AC-FHKNPRTV-Y][0-9]{2}|D6W)[ -]?[0-9AC-FHKNPRTV-Y]{4}$', postcode)
|
def test_street_address(self, faker, num_samples):
'\n Tests street address.\n\n A street address must consist of a street name, a place type and a number, and end in a period point.\n '
for _ in range(num_samples):
address = faker.street_address()
assert (address[(- 1)] == '.')
assert address.split(' ')[(- 2)][0].islower()
assert re.fullmatch('\\d{1,4}\\.', address.split(' ')[(- 1)])
| -2,341,447,547,604,299,300
|
Tests street address.
A street address must consist of a street name, a place type and a number, and end in a period point.
|
tests/providers/test_address.py
|
test_street_address
|
Pipoline/faker
|
python
|
def test_street_address(self, faker, num_samples):
'\n Tests street address.\n\n A street address must consist of a street name, a place type and a number, and end in a period point.\n '
for _ in range(num_samples):
address = faker.street_address()
assert (address[(- 1)] == '.')
assert address.split(' ')[(- 2)][0].islower()
assert re.fullmatch('\\d{1,4}\\.', address.split(' ')[(- 1)])
|
def test_street_address_with_county(self, faker, num_samples):
'Tests street address with country. A street address must be:\n - in three rows,\n - starting with a valid street address,\n - contain a valid post code,\n - contain the place name validly capitalized.\n '
for _ in range(num_samples):
address = faker.street_address_with_county()
assert (len(address.split('\n')) == 3)
(first, second, last) = address.split('\n')
assert first[0].isupper()
assert first.split(' ')[(- 2)][0].islower()
assert re.fullmatch('\\d{1,4}\\.', first.split(' ')[(- 1)])
assert second.split(' ')[(- 1)][0].islower()
assert second.split(' ')[0][0].isupper()
assert re.fullmatch('H-[1-9]\\d{3}', last.split(' ')[0])
assert last.split(' ')[(- 1)][0].isupper()
| -3,124,989,030,859,973,600
|
Tests street address with country. A street address must be:
- in three rows,
- starting with a valid street address,
- contain a valid post code,
- contain the place name validly capitalized.
|
tests/providers/test_address.py
|
test_street_address_with_county
|
Pipoline/faker
|
python
|
def test_street_address_with_county(self, faker, num_samples):
'Tests street address with country. A street address must be:\n - in three rows,\n - starting with a valid street address,\n - contain a valid post code,\n - contain the place name validly capitalized.\n '
for _ in range(num_samples):
address = faker.street_address_with_county()
assert (len(address.split('\n')) == 3)
(first, second, last) = address.split('\n')
assert first[0].isupper()
assert first.split(' ')[(- 2)][0].islower()
assert re.fullmatch('\\d{1,4}\\.', first.split(' ')[(- 1)])
assert second.split(' ')[(- 1)][0].islower()
assert second.split(' ')[0][0].isupper()
assert re.fullmatch('H-[1-9]\\d{3}', last.split(' ')[0])
assert last.split(' ')[(- 1)][0].isupper()
|
@pytest.mark.parametrize('street_title,street_suffix,expected', [('Фрунзе', 'ул.', 'ул. Фрунзе'), ('Ставропольская', 'ул.', 'ул. Ставропольская'), ('Фрунзе', 'пр.', 'пр. Фрунзе'), ('Осенняя', 'пр.', 'пр. Осенний'), ('Гвардейская', 'пр.', 'пр. Гвардейский'), ('Рыбацкая', 'пр.', 'пр. Рыбацкий'), ('Безымянная', 'пр.', 'пр. Безымянный'), ('Проезжая', 'ш.', 'ш. Проезжее'), ('Магистральная', 'ш.', 'ш. Магистральное')], ids=['feminine_suffix_and_noflex_title', 'feminine_suffix_and_flex_title', 'non_feminine_suffix_and_noflex_title', 'masc_suffix_and_irregular_masc_title', 'masc_suffix_and_ck_street_stem', 'masc_suffix_and_uk_street_stem', 'masc_suffix_and_other_stem', 'neu_suffx_and_iregular_neu_street_title', 'neu_suffix_and_regular_street_title'])
def test_street_name_lexical(self, faker, street_title, street_suffix, expected):
'Test that random street names are formed correctly, given\n the case of suffixes and streets that have been randomly selected.\n '
title_patch = mock.patch('faker.providers.address.ru_RU.Provider.street_title', autospec=True, return_value=street_title)
suffix_patch = mock.patch('faker.providers.address.ru_RU.Provider.street_suffix', autospec=True, return_value=street_suffix)
with title_patch, suffix_patch:
result = faker.street_name()
assert (result == expected)
| -6,692,898,459,401,839,000
|
Test that random street names are formed correctly, given
the case of suffixes and streets that have been randomly selected.
|
tests/providers/test_address.py
|
test_street_name_lexical
|
Pipoline/faker
|
python
|
@pytest.mark.parametrize('street_title,street_suffix,expected', [('Фрунзе', 'ул.', 'ул. Фрунзе'), ('Ставропольская', 'ул.', 'ул. Ставропольская'), ('Фрунзе', 'пр.', 'пр. Фрунзе'), ('Осенняя', 'пр.', 'пр. Осенний'), ('Гвардейская', 'пр.', 'пр. Гвардейский'), ('Рыбацкая', 'пр.', 'пр. Рыбацкий'), ('Безымянная', 'пр.', 'пр. Безымянный'), ('Проезжая', 'ш.', 'ш. Проезжее'), ('Магистральная', 'ш.', 'ш. Магистральное')], ids=['feminine_suffix_and_noflex_title', 'feminine_suffix_and_flex_title', 'non_feminine_suffix_and_noflex_title', 'masc_suffix_and_irregular_masc_title', 'masc_suffix_and_ck_street_stem', 'masc_suffix_and_uk_street_stem', 'masc_suffix_and_other_stem', 'neu_suffx_and_iregular_neu_street_title', 'neu_suffix_and_regular_street_title'])
def test_street_name_lexical(self, faker, street_title, street_suffix, expected):
'Test that random street names are formed correctly, given\n the case of suffixes and streets that have been randomly selected.\n '
title_patch = mock.patch('faker.providers.address.ru_RU.Provider.street_title', autospec=True, return_value=street_title)
suffix_patch = mock.patch('faker.providers.address.ru_RU.Provider.street_suffix', autospec=True, return_value=street_suffix)
with title_patch, suffix_patch:
result = faker.street_name()
assert (result == expected)
|
def select_plugins(session, directory: str) -> List[Plugin]:
'\n Select all plugins that should be tested in this session.\n Considers the current Python version and operating systems against the supported ones,\n as well as the user plugins selection (via the PLUGINS environment variable).\n '
assert (session.python is not None), 'Session python version is not specified'
blacklist = ['.isort.cfg', 'examples']
plugins = [{'dir_name': x, 'path': x} for x in sorted(os.listdir(os.path.join(BASE, directory))) if (x not in blacklist)]
ret = []
skipped = []
for plugin in plugins:
if (not ((plugin['dir_name'] in PLUGINS) or (PLUGINS == ['ALL']))):
skipped.append(f"Deselecting {plugin['dir_name']}: User request")
continue
setup_py = os.path.join(BASE, directory, plugin['path'], 'setup.py')
classifiers = session.run('python', setup_py, '--name', '--classifiers', silent=True).splitlines()
plugin_name = classifiers.pop(0)
plugin_python_versions = get_setup_python_versions(classifiers)
python_supported = (session.python in plugin_python_versions)
plugin_os_names = get_plugin_os_names(classifiers)
os_supported = (get_current_os() in plugin_os_names)
if (not python_supported):
py_str = ', '.join(plugin_python_versions)
skipped.append(f"Deselecting {plugin['dir_name']} : Incompatible Python {session.python}. Supports [{py_str}]")
continue
if (not os_supported):
os_str = ', '.join(plugin_os_names)
skipped.append(f"Deselecting {plugin['dir_name']}: Incompatible OS {get_current_os()}. Supports [{os_str}]")
continue
ret.append(Plugin(name=plugin_name, path=plugin['path'], module=('hydra_plugins.' + plugin['dir_name'])))
for msg in skipped:
logger.warn(msg)
if (len(ret) == 0):
logger.warn('No plugins selected')
return ret
| -1,294,786,704,034,799,900
|
Select all plugins that should be tested in this session.
Considers the current Python version and operating systems against the supported ones,
as well as the user plugins selection (via the PLUGINS environment variable).
|
noxfile.py
|
select_plugins
|
strx2322/hydra
|
python
|
def select_plugins(session, directory: str) -> List[Plugin]:
'\n Select all plugins that should be tested in this session.\n Considers the current Python version and operating systems against the supported ones,\n as well as the user plugins selection (via the PLUGINS environment variable).\n '
assert (session.python is not None), 'Session python version is not specified'
blacklist = ['.isort.cfg', 'examples']
plugins = [{'dir_name': x, 'path': x} for x in sorted(os.listdir(os.path.join(BASE, directory))) if (x not in blacklist)]
ret = []
skipped = []
for plugin in plugins:
if (not ((plugin['dir_name'] in PLUGINS) or (PLUGINS == ['ALL']))):
skipped.append(f"Deselecting {plugin['dir_name']}: User request")
continue
setup_py = os.path.join(BASE, directory, plugin['path'], 'setup.py')
classifiers = session.run('python', setup_py, '--name', '--classifiers', silent=True).splitlines()
plugin_name = classifiers.pop(0)
plugin_python_versions = get_setup_python_versions(classifiers)
python_supported = (session.python in plugin_python_versions)
plugin_os_names = get_plugin_os_names(classifiers)
os_supported = (get_current_os() in plugin_os_names)
if (not python_supported):
py_str = ', '.join(plugin_python_versions)
skipped.append(f"Deselecting {plugin['dir_name']} : Incompatible Python {session.python}. Supports [{py_str}]")
continue
if (not os_supported):
os_str = ', '.join(plugin_os_names)
skipped.append(f"Deselecting {plugin['dir_name']}: Incompatible OS {get_current_os()}. Supports [{os_str}]")
continue
ret.append(Plugin(name=plugin_name, path=plugin['path'], module=('hydra_plugins.' + plugin['dir_name'])))
for msg in skipped:
logger.warn(msg)
if (len(ret) == 0):
logger.warn('No plugins selected')
return ret
|
def determine_appliers(self, label_id, version):
'Figure out which layers to apply by checking the GET args'
if ('layers' in self.request.GET.keys()):
return utils.handle_specified_layers(self.request.GET['layers'], label_id, version, self.__class__.sectional_links)
else:
layer_creator = generator.LayerCreator()
layer_creator.add_layers(generator.LayerCreator.LAYERS.keys(), label_id, version, self.__class__.sectional_links)
return layer_creator.get_appliers()
| 2,181,151,661,838,374,400
|
Figure out which layers to apply by checking the GET args
|
regulations/views/partial.py
|
determine_appliers
|
DalavanCloud/regulations-site
|
python
|
def determine_appliers(self, label_id, version):
if ('layers' in self.request.GET.keys()):
return utils.handle_specified_layers(self.request.GET['layers'], label_id, version, self.__class__.sectional_links)
else:
layer_creator = generator.LayerCreator()
layer_creator.add_layers(generator.LayerCreator.LAYERS.keys(), label_id, version, self.__class__.sectional_links)
return layer_creator.get_appliers()
|
@classmethod
def snapshot_message_from_exchange(cls, msg: Dict[(str, Any)], timestamp: float, *args, **kwargs):
'\n Convert json snapshot data into standard OrderBookMessage format\n :param msg: json snapshot data from live web socket stream\n :param timestamp: timestamp attached to incoming data\n :return: BinarzOrderBookMessage\n '
return BinarzOrderBookMessage(*args, message_type=OrderBookMessageType.SNAPSHOT, content=msg, timestamp=timestamp, **kwargs)
| 2,630,302,331,819,362,000
|
Convert json snapshot data into standard OrderBookMessage format
:param msg: json snapshot data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: BinarzOrderBookMessage
|
hummingbot/connector/exchange/binarz/binarz_order_book.py
|
snapshot_message_from_exchange
|
amirhosein-fasihi/hummingbot
|
python
|
@classmethod
def snapshot_message_from_exchange(cls, msg: Dict[(str, Any)], timestamp: float, *args, **kwargs):
'\n Convert json snapshot data into standard OrderBookMessage format\n :param msg: json snapshot data from live web socket stream\n :param timestamp: timestamp attached to incoming data\n :return: BinarzOrderBookMessage\n '
return BinarzOrderBookMessage(*args, message_type=OrderBookMessageType.SNAPSHOT, content=msg, timestamp=timestamp, **kwargs)
|
@classmethod
def snapshot_message_from_db(cls, record: RowProxy):
'\n *used for backtesting\n Convert a row of snapshot data into standard OrderBookMessage format\n :param record: a row of snapshot data from the database\n :return: BinarzBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.SNAPSHOT, content=record.json, timestamp=record.timestamp)
| 6,984,927,935,560,781,000
|
*used for backtesting
Convert a row of snapshot data into standard OrderBookMessage format
:param record: a row of snapshot data from the database
:return: BinarzBookMessage
|
hummingbot/connector/exchange/binarz/binarz_order_book.py
|
snapshot_message_from_db
|
amirhosein-fasihi/hummingbot
|
python
|
@classmethod
def snapshot_message_from_db(cls, record: RowProxy):
'\n *used for backtesting\n Convert a row of snapshot data into standard OrderBookMessage format\n :param record: a row of snapshot data from the database\n :return: BinarzBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.SNAPSHOT, content=record.json, timestamp=record.timestamp)
|
@classmethod
def diff_message_from_exchange(cls, msg: Dict[(str, any)], timestamp: Optional[float]=None):
'\n Convert json diff data into standard OrderBookMessage format\n :param msg: json diff data from live web socket stream\n :param timestamp: timestamp attached to incoming data\n :return: BinarzOrderBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.DIFF, content=msg, timestamp=timestamp)
| -8,485,638,870,252,996,000
|
Convert json diff data into standard OrderBookMessage format
:param msg: json diff data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: BinarzOrderBookMessage
|
hummingbot/connector/exchange/binarz/binarz_order_book.py
|
diff_message_from_exchange
|
amirhosein-fasihi/hummingbot
|
python
|
@classmethod
def diff_message_from_exchange(cls, msg: Dict[(str, any)], timestamp: Optional[float]=None):
'\n Convert json diff data into standard OrderBookMessage format\n :param msg: json diff data from live web socket stream\n :param timestamp: timestamp attached to incoming data\n :return: BinarzOrderBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.DIFF, content=msg, timestamp=timestamp)
|
@classmethod
def diff_message_from_db(cls, record: RowProxy):
'\n *used for backtesting\n Convert a row of diff data into standard OrderBookMessage format\n :param record: a row of diff data from the database\n :return: BinarzBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.DIFF, content=record.json, timestamp=record.timestamp)
| -4,607,532,064,758,849,000
|
*used for backtesting
Convert a row of diff data into standard OrderBookMessage format
:param record: a row of diff data from the database
:return: BinarzBookMessage
|
hummingbot/connector/exchange/binarz/binarz_order_book.py
|
diff_message_from_db
|
amirhosein-fasihi/hummingbot
|
python
|
@classmethod
def diff_message_from_db(cls, record: RowProxy):
'\n *used for backtesting\n Convert a row of diff data into standard OrderBookMessage format\n :param record: a row of diff data from the database\n :return: BinarzBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.DIFF, content=record.json, timestamp=record.timestamp)
|
@classmethod
def trade_message_from_exchange(cls, msg: BinarzTrade, timestamp: Optional[float]=None):
'\n Convert a trade data into standard OrderBookMessage format\n '
msg = {'exchange_order_id': msg.order_id, 'trade_type': msg.type, 'price': msg.price, 'amount': msg.amount}
return BinarzOrderBookMessage(message_type=OrderBookMessageType.TRADE, content=msg, timestamp=timestamp)
| -265,529,715,953,605,300
|
Convert a trade data into standard OrderBookMessage format
|
hummingbot/connector/exchange/binarz/binarz_order_book.py
|
trade_message_from_exchange
|
amirhosein-fasihi/hummingbot
|
python
|
@classmethod
def trade_message_from_exchange(cls, msg: BinarzTrade, timestamp: Optional[float]=None):
'\n \n '
msg = {'exchange_order_id': msg.order_id, 'trade_type': msg.type, 'price': msg.price, 'amount': msg.amount}
return BinarzOrderBookMessage(message_type=OrderBookMessageType.TRADE, content=msg, timestamp=timestamp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.