hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79088a8c887039e1eda9eff891c06f98b9e50b1a
| 2,930
|
py
|
Python
|
configs/eftnet/R2_ttf53_whh_3lr_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/eftnet/R2_ttf53_whh_3lr_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/eftnet/R2_ttf53_whh_3lr_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
wh_heatmap=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
hm_center_ratio=0.27,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttf53_whh_3lr_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 29.59596
| 86
| 0.63959
|
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
wh_heatmap=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
hm_center_ratio=0.27,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
log_config = dict(interval=20)
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttf53_whh_3lr_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true
| true
|
79088b9c085896c740bddf2abb2d9a0921a98fba
| 2,146
|
py
|
Python
|
samples.py
|
lovpuss/xmind2testcase2021
|
1d01e6ebd4889373aba94e32a0948347f87aef06
|
[
"MIT"
] | null | null | null |
samples.py
|
lovpuss/xmind2testcase2021
|
1d01e6ebd4889373aba94e32a0948347f87aef06
|
[
"MIT"
] | null | null | null |
samples.py
|
lovpuss/xmind2testcase2021
|
1d01e6ebd4889373aba94e32a0948347f87aef06
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import json
import xmind
import logging
from xmind2testcase2021.zentao import xmind_to_zentao_csv_file
from xmind2testcase2021.testlink import xmind_to_testlink_xml_file
from xmind2testcase2021.utils import xmind_testcase_to_json_file
from xmind2testcase2021.utils import xmind_testsuite_to_json_file
from xmind2testcase2021.utils import get_xmind_testcase_list
from xmind2testcase2021.utils import get_xmind_testsuite_list
logging.basicConfig(level=logging.INFO)
def main():
xmind_file = 'docs/xmind_testcase_template_v1.1.xmind'
print('Start to convert XMind file: %s' % xmind_file)
# 1、testcases import file
# (1) zentao
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
print('Convert XMind file to zentao csv file successfully: %s' % zentao_csv_file)
# (2) testlink
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)
print('Convert XMind file to testlink xml file successfully: %s' % testlink_xml_file)
# 2、 testcases json file
# (1) testsuite
testsuite_json_file = xmind_testsuite_to_json_file(xmind_file)
print('Convert XMind file to testsuite json file successfully: %s' % testsuite_json_file)
# (2) testcase
testcase_json_file = xmind_testcase_to_json_file(xmind_file)
print('Convert XMind file to testcase json file successfully: %s' % testcase_json_file)
# 3、test dict/json data
# (1) testsuite
testsuites = get_xmind_testsuite_list(xmind_file)
print('Convert XMind to testsuits dict data:\n%s' %
json.dumps(testsuites, indent=2, separators=(',', ': '), ensure_ascii=False))
# (2) testcase
testcases = get_xmind_testcase_list(xmind_file)
print('Convert Xmind to testcases dict data:\n%s' %
json.dumps(testcases, indent=4, separators=(',', ': '), ensure_ascii=False))
# (3) xmind file
workbook = xmind.load(xmind_file)
print('Convert XMind to Json data:\n%s' %
json.dumps(workbook.getData(), indent=2, separators=(',', ': '), ensure_ascii=False))
print('Finished conversion, Congratulations!')
if __name__ == '__main__':
main()
| 39.740741
| 95
| 0.741379
|
import json
import xmind
import logging
from xmind2testcase2021.zentao import xmind_to_zentao_csv_file
from xmind2testcase2021.testlink import xmind_to_testlink_xml_file
from xmind2testcase2021.utils import xmind_testcase_to_json_file
from xmind2testcase2021.utils import xmind_testsuite_to_json_file
from xmind2testcase2021.utils import get_xmind_testcase_list
from xmind2testcase2021.utils import get_xmind_testsuite_list
logging.basicConfig(level=logging.INFO)
def main():
xmind_file = 'docs/xmind_testcase_template_v1.1.xmind'
print('Start to convert XMind file: %s' % xmind_file)
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
print('Convert XMind file to zentao csv file successfully: %s' % zentao_csv_file)
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)
print('Convert XMind file to testlink xml file successfully: %s' % testlink_xml_file)
testsuite_json_file = xmind_testsuite_to_json_file(xmind_file)
print('Convert XMind file to testsuite json file successfully: %s' % testsuite_json_file)
testcase_json_file = xmind_testcase_to_json_file(xmind_file)
print('Convert XMind file to testcase json file successfully: %s' % testcase_json_file)
testsuites = get_xmind_testsuite_list(xmind_file)
print('Convert XMind to testsuits dict data:\n%s' %
json.dumps(testsuites, indent=2, separators=(',', ': '), ensure_ascii=False))
testcases = get_xmind_testcase_list(xmind_file)
print('Convert Xmind to testcases dict data:\n%s' %
json.dumps(testcases, indent=4, separators=(',', ': '), ensure_ascii=False))
workbook = xmind.load(xmind_file)
print('Convert XMind to Json data:\n%s' %
json.dumps(workbook.getData(), indent=2, separators=(',', ': '), ensure_ascii=False))
print('Finished conversion, Congratulations!')
if __name__ == '__main__':
main()
| true
| true
|
79088de94df4279e83bd716206278a21dad0cc77
| 84
|
py
|
Python
|
src/search_a_song_page.py
|
AlexCaranha/QueryByHumming
|
17c4f9c9994d3be657bdd5d858d47f1800bf2209
|
[
"MIT"
] | 1
|
2022-02-08T03:15:24.000Z
|
2022-02-08T03:15:24.000Z
|
src/search_a_song_page.py
|
AlexCaranha/QueryByHumming
|
17c4f9c9994d3be657bdd5d858d47f1800bf2209
|
[
"MIT"
] | null | null | null |
src/search_a_song_page.py
|
AlexCaranha/QueryByHumming
|
17c4f9c9994d3be657bdd5d858d47f1800bf2209
|
[
"MIT"
] | null | null | null |
import streamlit as st
def render():
st.write("You are in Search a song page")
| 16.8
| 45
| 0.690476
|
import streamlit as st
def render():
st.write("You are in Search a song page")
| true
| true
|
79088ece85377829fc095f0bd9b65f077e6f6124
| 5,982
|
py
|
Python
|
nginx_conf_gen.py
|
alex-v-yakimov/nginx-conf
|
75c752a602eb2946775c3346a8e79154450fc315
|
[
"BSD-2-Clause"
] | null | null | null |
nginx_conf_gen.py
|
alex-v-yakimov/nginx-conf
|
75c752a602eb2946775c3346a8e79154450fc315
|
[
"BSD-2-Clause"
] | null | null | null |
nginx_conf_gen.py
|
alex-v-yakimov/nginx-conf
|
75c752a602eb2946775c3346a8e79154450fc315
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
import sys
import json
import getopt
import os
import jsonschema
import subprocess
if os.geteuid() != 0:
print('You must be a root user')
sys.exit(72)
json_file = ''
nginx_conf = '/etc/nginx/nginx.conf'
schema_file = ''
test = False
#------Parse command-line options------
def usage():
print ('Usage: ' + sys.argv[0] + ' -j json_file [-c nginx_ conf] [-s schema_file] [-t] [-v] [-h]')
print (' options:')
print (' -j json_file : JSON file (required option)')
print (' -c nginx_conf : Nginx config file (default: /etc/nginx/nginx.conf)')
print (' -s schema_file : JSON schema file')
print (" -t : Test Nginx config file by command '/usr/sbin/nginx -t -c <nginx.conf>'")
print (' -v : Version')
print (' -h : Show this help page')
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hvtj:c:s:')
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(73)
if len(args) != 0:
print('Incorrect options: ' + ' '.join(args))
usage()
sys.exit(74)
else:
for o, a in opts:
if o == '-h':
usage()
sys.exit()
elif o == '-v':
print('version: 0.0.1')
sys.exit()
elif o == '-t':
test = True
elif o == '-j':
json_file = a
elif o == '-c':
nginx_conf = a
elif o == '-s':
schema_file = a
if json_file == '':
print('JSON file is required')
usage()
sys.exit(75)
#------Get json and schema data------
try:
fh = open(json_file, 'r')
except IOError:
print("Could not opent the file '{0}' for reading".format(json_file))
sys.exit(76)
data=json.load(fh)
fh.close()
if schema_file != '':
try:
fh = open(schema_file, 'r')
except IOError:
print("Could not opent the file '{0}' for reading".format(schema_file))
sys.exit(77)
schema=json.load(fh)
fh.close()
try:
jsonschema.validate(data, schema)
except Exception as e:
print(e)
sys.exit(78)
#------Nginx functions------
def pcrejit():
try:
output = subprocess.check_output('/usr/sbin/nginx -V', stderr=subprocess.STDOUT, shell=True)
if output.decode().find('--with-pcre-jit') != -1:
return 'on'
else:
return 'off'
except Exception:
return 'off'
def test_conf():
if test:
try:
output = subprocess.check_output('/usr/sbin/nginx -t -c ' + nginx_conf, stderr=subprocess.STDOUT, shell=True)
print(output.decode())
except Exception as e:
print(e)
#------Test 'location /'------
location_root_test = []
for server in data.get('http').get('server'):
for location in server.get('location'):
location_root_test.append(location.get('URI'))
if '/' not in location_root_test:
print("There is not 'location /' in JSON file")
sys.exit(79)
#------Make Nginx config file------
try:
fh = open(nginx_conf, 'w')
except IOError:
print("Could not open the file '{0}' for writing".format(nginx_conf))
sys.exit(78)
fh.write( 'user ' + json.dumps(data.get('user')) + ';\n' )
fh.write( 'worker_processes ' + json.dumps(data.get('worker_processes')) + ';\n' )
fh.write( 'error_log ' + json.dumps(data.get('error_log').get('file')) + ' '
+ json.dumps(data.get('error_log').get('level')) + ';\n' )
fh.write( 'pid ' + json.dumps(data.get('pid')) + ';\n' )
fh.write( 'pcre_jit ' + pcrejit() + ';\n' )
fh.write( 'events { worker_connections ' + json.dumps(data.get('events').get('worker_connections')) + '; }\n' )
fh.write( 'http {\n')
fh.write( ' include ' + json.dumps(data.get('http').get('include')) + ';\n' )
fh.write( ' default_type ' + json.dumps(data.get('http').get('default_type')) + ';\n' )
fh.write( ' log_format ' + json.dumps(data.get('http').get('log_format').get('name')) + " "
+ json.dumps(data.get('http').get('log_format').get('string')) + ";\n" )
fh.write( ' access_log ' + json.dumps(data.get('http').get('access_log').get('file')) + ' '
+ json.dumps(data.get('http').get('access_log').get('name')) + ';\n' )
for server in data.get('http').get('server'):
fh.write(' server {\n')
fh.write(' listen ' + json.dumps(server.get('listen')) + ';\n')
fh.write(' server_name ' + json.dumps(server.get('server_name')) + ';\n')
# noindex 'location = /robots.txt'
for extra in server.get('extra', []):
if extra == 'noindex':
fh.write(' location = /robots.txt {\n')
fh.write(' default_type "text/plain";\n')
fh.write(' return 200 "User-agent: *\\nDisallow: /";\n')
fh.write(' }\n')
for location in server.get('location'):
fh.write(' location ' + location.get('modifier') + ' '
+ location.get('URI') + ' {\n')
for configuration in location.get('configuration'):
if configuration == 'proxy_set_header':
for proxy_set_header in location.get('configuration').get(configuration):
fh.write(' proxy_set_header ' + proxy_set_header.get('field') + ' '
+ json.dumps(proxy_set_header.get('value')) + ';\n')
elif configuration == 'return':
fh.write(' return ' + location.get('configuration').get(configuration).get('code') + ' '
+ json.dumps(location.get('configuration').get(configuration).get('text')) + ';\n')
else:
fh.write(' ' + configuration + ' ' + json.dumps(location.get('configuration').get(configuration)) + ';\n')
fh.write( ' }\n' )
fh.write( ' }\n' )
for upstream in data.get('http').get('upstream'):
fh.write(' upstream ' + json.dumps(upstream.get('name')) + ' {\n')
for server in upstream.get('server'):
fh.write(' server ' + json.dumps(server.get('address')))
for parameter in server.get('parameters'):
fh.write(' ' + json.dumps(parameter))
fh.write(';\n')
fh.write( ' }\n' )
fh.write( '}\n')
fh.close()
test_conf()
| 32.51087
| 121
| 0.57322
|
import sys
import json
import getopt
import os
import jsonschema
import subprocess
if os.geteuid() != 0:
print('You must be a root user')
sys.exit(72)
json_file = ''
nginx_conf = '/etc/nginx/nginx.conf'
schema_file = ''
test = False
def usage():
print ('Usage: ' + sys.argv[0] + ' -j json_file [-c nginx_ conf] [-s schema_file] [-t] [-v] [-h]')
print (' options:')
print (' -j json_file : JSON file (required option)')
print (' -c nginx_conf : Nginx config file (default: /etc/nginx/nginx.conf)')
print (' -s schema_file : JSON schema file')
print (" -t : Test Nginx config file by command '/usr/sbin/nginx -t -c <nginx.conf>'")
print (' -v : Version')
print (' -h : Show this help page')
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hvtj:c:s:')
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(73)
if len(args) != 0:
print('Incorrect options: ' + ' '.join(args))
usage()
sys.exit(74)
else:
for o, a in opts:
if o == '-h':
usage()
sys.exit()
elif o == '-v':
print('version: 0.0.1')
sys.exit()
elif o == '-t':
test = True
elif o == '-j':
json_file = a
elif o == '-c':
nginx_conf = a
elif o == '-s':
schema_file = a
if json_file == '':
print('JSON file is required')
usage()
sys.exit(75)
try:
fh = open(json_file, 'r')
except IOError:
print("Could not opent the file '{0}' for reading".format(json_file))
sys.exit(76)
data=json.load(fh)
fh.close()
if schema_file != '':
try:
fh = open(schema_file, 'r')
except IOError:
print("Could not opent the file '{0}' for reading".format(schema_file))
sys.exit(77)
schema=json.load(fh)
fh.close()
try:
jsonschema.validate(data, schema)
except Exception as e:
print(e)
sys.exit(78)
def pcrejit():
try:
output = subprocess.check_output('/usr/sbin/nginx -V', stderr=subprocess.STDOUT, shell=True)
if output.decode().find('--with-pcre-jit') != -1:
return 'on'
else:
return 'off'
except Exception:
return 'off'
def test_conf():
if test:
try:
output = subprocess.check_output('/usr/sbin/nginx -t -c ' + nginx_conf, stderr=subprocess.STDOUT, shell=True)
print(output.decode())
except Exception as e:
print(e)
location_root_test = []
for server in data.get('http').get('server'):
for location in server.get('location'):
location_root_test.append(location.get('URI'))
if '/' not in location_root_test:
print("There is not 'location /' in JSON file")
sys.exit(79)
try:
fh = open(nginx_conf, 'w')
except IOError:
print("Could not open the file '{0}' for writing".format(nginx_conf))
sys.exit(78)
fh.write( 'user ' + json.dumps(data.get('user')) + ';\n' )
fh.write( 'worker_processes ' + json.dumps(data.get('worker_processes')) + ';\n' )
fh.write( 'error_log ' + json.dumps(data.get('error_log').get('file')) + ' '
+ json.dumps(data.get('error_log').get('level')) + ';\n' )
fh.write( 'pid ' + json.dumps(data.get('pid')) + ';\n' )
fh.write( 'pcre_jit ' + pcrejit() + ';\n' )
fh.write( 'events { worker_connections ' + json.dumps(data.get('events').get('worker_connections')) + '; }\n' )
fh.write( 'http {\n')
fh.write( ' include ' + json.dumps(data.get('http').get('include')) + ';\n' )
fh.write( ' default_type ' + json.dumps(data.get('http').get('default_type')) + ';\n' )
fh.write( ' log_format ' + json.dumps(data.get('http').get('log_format').get('name')) + " "
+ json.dumps(data.get('http').get('log_format').get('string')) + ";\n" )
fh.write( ' access_log ' + json.dumps(data.get('http').get('access_log').get('file')) + ' '
+ json.dumps(data.get('http').get('access_log').get('name')) + ';\n' )
for server in data.get('http').get('server'):
fh.write(' server {\n')
fh.write(' listen ' + json.dumps(server.get('listen')) + ';\n')
fh.write(' server_name ' + json.dumps(server.get('server_name')) + ';\n')
for extra in server.get('extra', []):
if extra == 'noindex':
fh.write(' location = /robots.txt {\n')
fh.write(' default_type "text/plain";\n')
fh.write(' return 200 "User-agent: *\\nDisallow: /";\n')
fh.write(' }\n')
for location in server.get('location'):
fh.write(' location ' + location.get('modifier') + ' '
+ location.get('URI') + ' {\n')
for configuration in location.get('configuration'):
if configuration == 'proxy_set_header':
for proxy_set_header in location.get('configuration').get(configuration):
fh.write(' proxy_set_header ' + proxy_set_header.get('field') + ' '
+ json.dumps(proxy_set_header.get('value')) + ';\n')
elif configuration == 'return':
fh.write(' return ' + location.get('configuration').get(configuration).get('code') + ' '
+ json.dumps(location.get('configuration').get(configuration).get('text')) + ';\n')
else:
fh.write(' ' + configuration + ' ' + json.dumps(location.get('configuration').get(configuration)) + ';\n')
fh.write( ' }\n' )
fh.write( ' }\n' )
for upstream in data.get('http').get('upstream'):
fh.write(' upstream ' + json.dumps(upstream.get('name')) + ' {\n')
for server in upstream.get('server'):
fh.write(' server ' + json.dumps(server.get('address')))
for parameter in server.get('parameters'):
fh.write(' ' + json.dumps(parameter))
fh.write(';\n')
fh.write( ' }\n' )
fh.write( '}\n')
fh.close()
test_conf()
| true
| true
|
79088f0901ba7bed69e591f39b9713f34e924bce
| 2,303
|
py
|
Python
|
build/lib/django_simple_file_handler/migrations/0003_auto_20180525_1035.py
|
jonathanrickard/django-simple-file-handler
|
f714b93b941b3a677a8fd2a2eb425afaaa0a2d62
|
[
"MIT"
] | 5
|
2020-09-17T16:41:01.000Z
|
2021-05-21T22:42:56.000Z
|
build/lib/django_simple_file_handler/migrations/0003_auto_20180525_1035.py
|
jonathanrickard/django-simple-file-handler
|
f714b93b941b3a677a8fd2a2eb425afaaa0a2d62
|
[
"MIT"
] | null | null | null |
build/lib/django_simple_file_handler/migrations/0003_auto_20180525_1035.py
|
jonathanrickard/django-simple-file-handler
|
f714b93b941b3a677a8fd2a2eb425afaaa0a2d62
|
[
"MIT"
] | 1
|
2021-01-09T13:04:38.000Z
|
2021-01-09T13:04:38.000Z
|
from django.db import migrations, models
import django_simple_file_handler.models
class Migration(migrations.Migration):
dependencies = [
('django_simple_file_handler', '0002_auto_20180521_1545'),
]
operations = [
migrations.AlterField(
model_name='privatedocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='privatepdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='processedimage',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='publicdocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='publicpdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='temporarydocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='temporarypdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='unprocessedimage',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
]
| 43.45283
| 143
| 0.671733
|
from django.db import migrations, models
import django_simple_file_handler.models
class Migration(migrations.Migration):
dependencies = [
('django_simple_file_handler', '0002_auto_20180521_1545'),
]
operations = [
migrations.AlterField(
model_name='privatedocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='privatepdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='processedimage',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='publicdocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='publicpdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='temporarydocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='temporarypdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='unprocessedimage',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
]
| true
| true
|
7908903b26156a0c8cd61f18feb0978d6c51870c
| 13,671
|
py
|
Python
|
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | 2
|
2020-08-24T18:04:22.000Z
|
2020-08-24T18:04:47.000Z
|
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lbvserver_appfwpolicy_binding(base_resource) :
"""Binding class showing the appfwpolicy that can be bound to lbvserver."""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._sc = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority."""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority.
:param priority:
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE."""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
:param gotopriorityexpression:
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the policy bound to the LB vserver."""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the policy bound to the LB vserver.
:param policyname:
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE."""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
:param bindpoint:
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel."""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
:param labeltype:
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked."""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label invoked.
:param labelname:
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke policies bound to a virtual server or policy label."""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke policies bound to a virtual server or policy label.
:param invoke:
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def sc(self) :
"""Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF."""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_appfwpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_appfwpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = lbvserver_appfwpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_appfwpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
"""Use this API to fetch lbvserver_appfwpolicy_binding resources.
:param service:
:param name:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
"""Use this API to fetch filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
"""Use this API to count lbvserver_appfwpolicy_binding resources configued on NetScaler.
:param service:
:param name:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
"""Use this API to count the filtered set of lbvserver_appfwpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param name:
:param filter_:
"""
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
""" """
ON = "ON"
OFF = "OFF"
class Bindpoint:
""" """
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
""" """
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_appfwpolicy_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.lbvserver_appfwpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_appfwpolicy_binding = [lbvserver_appfwpolicy_binding() for _ in range(length)]
| 33.182039
| 308
| 0.58657
|
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lbvserver_appfwpolicy_binding(base_resource) :
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._sc = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
try :
self._name = name
except Exception as e:
raise e
@property
def bindpoint(self) :
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def labeltype(self) :
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke(self) :
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def sc(self) :
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
try :
result = service.payload_formatter.string_to_resource(lbvserver_appfwpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_appfwpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbvserver_appfwpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbvserver_appfwpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
try :
obj = lbvserver_appfwpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_appfwpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_appfwpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_appfwpolicy_binding = [lbvserver_appfwpolicy_binding() for _ in range(length)]
| true
| true
|
790890ee5574aa74994ad5ba300aad26e29c846a
| 6,873
|
py
|
Python
|
container_data/.config/qBittorrent/plugins/nova3/engines/leetx.py
|
Kira9204/wireguard-qbittorrent
|
54110194fb1051b49d7e39a6754e9a699b18d33e
|
[
"MIT"
] | null | null | null |
container_data/.config/qBittorrent/plugins/nova3/engines/leetx.py
|
Kira9204/wireguard-qbittorrent
|
54110194fb1051b49d7e39a6754e9a699b18d33e
|
[
"MIT"
] | null | null | null |
container_data/.config/qBittorrent/plugins/nova3/engines/leetx.py
|
Kira9204/wireguard-qbittorrent
|
54110194fb1051b49d7e39a6754e9a699b18d33e
|
[
"MIT"
] | null | null | null |
#VERSION: 2.3
#AUTHORS: Vikas Yadav (https://github.com/v1k45 | http://v1k45.com)
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from html.parser import HTMLParser
from helpers import retrieve_url
from novaprinter import prettyPrinter
class leetx(object):
url = "https://1337x.to"
name = "1337x"
supported_categories = {
'all': 'All',
'movies': 'Movies',
'tv': 'TV',
'music': 'Music',
'games': 'Games',
'anime': 'Anime',
'software': 'Apps'
}
class MyHtmlParser(HTMLParser):
A, TABLE, TR, TD, SPAN = ('a', 'table', 'tr', 'td', 'span')
""" Sub-class for parsing results """
def __init__(self, results, url):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.current_result = {}
self.current_item = None
self.inside_table = False
self.inside_row = False
def handle_starttag(self, tag, attrs):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
self.inside_table = self.inside_table or tag == self.TABLE
if not self.inside_table:
return
# convert attrs tuple to dictionary
attrs = dict(attrs)
# for torrent name and link
link = attrs.get('href', '')
if tag == self.A and link.startswith('/torrent'):
self.current_result['link'] = self.url + link
self.current_result['desc_link'] = self.url + link
self.current_result['engine_url'] = self.url
self.current_item = 'name'
# to ignore uploader name attached to the torrent size in span tag
if tag == self.SPAN:
self.current_item = None
# if this is a <td> there can be seeds, leeches or size inside it.
if tag == self.TD:
self.inside_row = True
# find apporipate data key using class name of td
for item in ['seeds', 'leech', 'size']:
if item in attrs.get('class', ''):
self.current_item = item
break
def handle_data(self, data):
# if we are not inside the table, no need to process any further
if not self.inside_table:
return
# do not process data if we are not inside the table body
if self.current_item:
prev_value = self.current_result.get(self.current_item, '')
self.current_result[self.current_item] = prev_value + data
def handle_endtag(self, tag):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
if tag == self.TABLE:
self.inside_table = False
if not self.inside_table:
return
# exiting the table data and maybe moving td or tr element
if self.inside_row and tag == self.TD:
self.inside_row = False
self.current_item = None
# exiting the tr element, which means all necessary data for a torrent has been
# extracted, we should save it and clean the object's state.
if self.current_result and tag == self.TR:
if 'size' in self.current_result:
self.current_result['size'] = self.current_result['size'].replace(',', '')
# skip malformed names (eg. with @)
if 'name' in self.current_result:
prettyPrinter(self.current_result)
self.results.append('a')
self.current_result = {}
self.current_item = None
def download_torrent(self, download_url):
# since 1337x does not provide torrent links in the search results,
# we will have to fetch the page and extract the magnet link
torrent_page = retrieve_url(download_url)
magnet_match = re.search(r"href\s*\=\s*\"(magnet[^\"]+)\"", torrent_page)
if magnet_match and magnet_match.groups():
print(magnet_match.groups()[0] + " " + download_url)
else:
raise Exception('Error, please fill a bug report!')
def search(self, what, cat='all'):
cat = cat.lower()
# decide which type of search to perform based on category
search_page = "search" if cat == 'all' else 'category-search'
search_url = "{url}/{search_page}/{search_query}/".format(
url=self.url, search_page=search_page, search_query=what)
# apply search category to url, if any.
if cat != 'all':
search_url += self.supported_categories[cat] + "/"
# try to get 15 pages (20 * 15 = 300 results) and stop when we don't found results
results_list = []
parser = self.MyHtmlParser(results_list, self.url)
page = 1
while page < 16:
# download the page
html = retrieve_url(search_url + str(page) + '/')
parser.feed(html)
if len(results_list) < 1:
break
del results_list[:]
page += 1
parser.close()
| 41.654545
| 94
| 0.60614
|
import re
from html.parser import HTMLParser
from helpers import retrieve_url
from novaprinter import prettyPrinter
class leetx(object):
url = "https://1337x.to"
name = "1337x"
supported_categories = {
'all': 'All',
'movies': 'Movies',
'tv': 'TV',
'music': 'Music',
'games': 'Games',
'anime': 'Anime',
'software': 'Apps'
}
class MyHtmlParser(HTMLParser):
A, TABLE, TR, TD, SPAN = ('a', 'table', 'tr', 'td', 'span')
def __init__(self, results, url):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.current_result = {}
self.current_item = None
self.inside_table = False
self.inside_row = False
def handle_starttag(self, tag, attrs):
self.inside_table = self.inside_table or tag == self.TABLE
if not self.inside_table:
return
attrs = dict(attrs)
link = attrs.get('href', '')
if tag == self.A and link.startswith('/torrent'):
self.current_result['link'] = self.url + link
self.current_result['desc_link'] = self.url + link
self.current_result['engine_url'] = self.url
self.current_item = 'name'
if tag == self.SPAN:
self.current_item = None
if tag == self.TD:
self.inside_row = True
for item in ['seeds', 'leech', 'size']:
if item in attrs.get('class', ''):
self.current_item = item
break
def handle_data(self, data):
if not self.inside_table:
return
if self.current_item:
prev_value = self.current_result.get(self.current_item, '')
self.current_result[self.current_item] = prev_value + data
def handle_endtag(self, tag):
if tag == self.TABLE:
self.inside_table = False
if not self.inside_table:
return
if self.inside_row and tag == self.TD:
self.inside_row = False
self.current_item = None
if self.current_result and tag == self.TR:
if 'size' in self.current_result:
self.current_result['size'] = self.current_result['size'].replace(',', '')
# skip malformed names (eg. with @)
if 'name' in self.current_result:
prettyPrinter(self.current_result)
self.results.append('a')
self.current_result = {}
self.current_item = None
def download_torrent(self, download_url):
# since 1337x does not provide torrent links in the search results,
# we will have to fetch the page and extract the magnet link
torrent_page = retrieve_url(download_url)
magnet_match = re.search(r"href\s*\=\s*\"(magnet[^\"]+)\"", torrent_page)
if magnet_match and magnet_match.groups():
print(magnet_match.groups()[0] + " " + download_url)
else:
raise Exception('Error, please fill a bug report!')
def search(self, what, cat='all'):
cat = cat.lower()
# decide which type of search to perform based on category
search_page = "search" if cat == 'all' else 'category-search'
search_url = "{url}/{search_page}/{search_query}/".format(
url=self.url, search_page=search_page, search_query=what)
# apply search category to url, if any.
if cat != 'all':
search_url += self.supported_categories[cat] + "/"
# try to get 15 pages (20 * 15 = 300 results) and stop when we don't found results
results_list = []
parser = self.MyHtmlParser(results_list, self.url)
page = 1
while page < 16:
# download the page
html = retrieve_url(search_url + str(page) + '/')
parser.feed(html)
if len(results_list) < 1:
break
del results_list[:]
page += 1
parser.close()
| true
| true
|
79089183a173182f1b58d41a9740c57cf59c543c
| 946
|
py
|
Python
|
turf/boolean_within/tests/test_boolean_within.py
|
diogomatoschaves/pyturf
|
966e0c37389f7ad398431498f16e7cc9b510cd56
|
[
"MIT"
] | 5
|
2020-04-12T15:15:51.000Z
|
2020-04-20T14:40:53.000Z
|
turf/boolean_within/tests/test_boolean_within.py
|
diogomatoschaves/pyturf
|
966e0c37389f7ad398431498f16e7cc9b510cd56
|
[
"MIT"
] | 36
|
2020-04-09T16:49:05.000Z
|
2020-06-01T14:39:37.000Z
|
turf/boolean_within/tests/test_boolean_within.py
|
diogomatoschaves/pyturf
|
966e0c37389f7ad398431498f16e7cc9b510cd56
|
[
"MIT"
] | null | null | null |
import pytest
import os
from turf.boolean_within import boolean_within
from turf.utils.test_setup import get_fixtures
current_path = os.path.dirname(os.path.realpath(__file__))
fixtures = get_fixtures(
current_path,
keys=["true", "false"],
)
class TestBooleanPointOnLine:
@pytest.mark.parametrize(
"fixture",
[
pytest.param(fixture, id=fixture_name)
for fixture_name, fixture in fixtures.items()
],
)
def test_boolean_point_on_line(self, fixture):
if "true" in fixture:
features = fixture.get("true")
feature_1, feature_2 = features["features"]
expected_result = True
else:
features = fixture.get("false")
feature_1, feature_2 = features["features"]
expected_result = False
test_result = boolean_within(feature_1, feature_2)
assert test_result == expected_result
| 23.65
| 58
| 0.639535
|
import pytest
import os
from turf.boolean_within import boolean_within
from turf.utils.test_setup import get_fixtures
current_path = os.path.dirname(os.path.realpath(__file__))
fixtures = get_fixtures(
current_path,
keys=["true", "false"],
)
class TestBooleanPointOnLine:
@pytest.mark.parametrize(
"fixture",
[
pytest.param(fixture, id=fixture_name)
for fixture_name, fixture in fixtures.items()
],
)
def test_boolean_point_on_line(self, fixture):
if "true" in fixture:
features = fixture.get("true")
feature_1, feature_2 = features["features"]
expected_result = True
else:
features = fixture.get("false")
feature_1, feature_2 = features["features"]
expected_result = False
test_result = boolean_within(feature_1, feature_2)
assert test_result == expected_result
| true
| true
|
7908919fd9c2722e099a3815953cf94ccddb5d9a
| 439
|
py
|
Python
|
array/twosum.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
array/twosum.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
array/twosum.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
lookup = dict(((v, i) for i, v in enumerate(nums)))
return next(( (i+1, lookup.get(target-v)+1)
for i, v in enumerate(nums)
if lookup.get(target-v, i) != i), None)
a = Solution()
print(a.twoSum([2, 11, 7, 15],9))
# 越简单的问题越要小心
| 29.266667
| 59
| 0.498861
|
class Solution:
def twoSum(self, nums, target):
lookup = dict(((v, i) for i, v in enumerate(nums)))
return next(( (i+1, lookup.get(target-v)+1)
for i, v in enumerate(nums)
if lookup.get(target-v, i) != i), None)
a = Solution()
print(a.twoSum([2, 11, 7, 15],9))
| true
| true
|
790891a39404f5ebac8886e67eb42917dbac546a
| 8,091
|
py
|
Python
|
frontend/update.py
|
daavofficial/pyLaunch
|
7119dbe64152a8bffe9e0f8b70ebb9ca89ce4e9a
|
[
"MIT"
] | 1
|
2022-01-06T15:11:29.000Z
|
2022-01-06T15:11:29.000Z
|
frontend/update.py
|
daavofficial/pyLaunch
|
7119dbe64152a8bffe9e0f8b70ebb9ca89ce4e9a
|
[
"MIT"
] | null | null | null |
frontend/update.py
|
daavofficial/pyLaunch
|
7119dbe64152a8bffe9e0f8b70ebb9ca89ce4e9a
|
[
"MIT"
] | null | null | null |
import os
import re
import shutil
import sys
import urllib.error
import urllib.parse
import urllib.request
from zipfile import ZipFile
import helpers.config as config
from helpers.logger import Logger
class Updater:
__instance = None
@staticmethod
def Get():
if Updater.__instance is None:
return Updater()
return Updater.__instance
def __init__(self):
if Updater.__instance is not None:
return
else:
self.log = Logger("pyLaunch.Frontend.Updater", "frontend.log")
self.DeleteFolders = ["src"]
self.UpdateFolder = "updatefiles"
def Automatic(self) -> bool:
if not self.CheckConnection():
return False
UpdateAvailable = self.CheckVersions()
if UpdateAvailable:
print(f"An update is available! [v{'.'.join(self.Versions[1])}]")
if not 'n' in input(f"Would you like to update from [{'.'.join(self.Versions[0])}]? (Y/n) > "):
if self.DownloadUpdate():
return self.InstallUpdate()
return False
def CheckConnection(self) -> str:
if config.CONFIGURATION['Update']['SkipCheck']:
return "Skipping update check"
try:
urllib.request.urlopen('http://google.com')
return True
except Exception as e:
return "Unable to connect to the internet" # Unable to connect to the internet
def DownloadUpdate(self) -> bool:
response = None
try:
response = urllib.request.urlopen(f"https://api.github.com/repos/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/zipball/{config.CONFIGURATION['Update']['Branch']}")
except urllib.error.HTTPError as e:
print(f"Unable to download update from GitHub: {e}")
input("Press enter to continue...")
return False
if not os.path.exists(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.mkdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
with open(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}gh_download.zip", "wb") as f:
f.write(response.read())
# Zip is downloaded, now extract
os.chdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
zipFileContent = dict()
zipFileContentSize = 0
with ZipFile(f"gh_download.zip", 'r') as zipFile:
for name in zipFile.namelist():
zipFileContent[name] = zipFile.getinfo(name).file_size
zipFileContentSize = sum(zipFileContent.values())
extractedContentSize = 0
for zippedFileName, zippedFileSize in zipFileContent.items():
UnzippedFilePath = os.path.abspath(f"{zippedFileName}")
os.makedirs(os.path.dirname(UnzippedFilePath), exist_ok=True)
if os.path.isfile(UnzippedFilePath):
zipFileContentSize -= zippedFileSize
else:
zipFile.extract(zippedFileName, path="", pwd=None)
extractedContentSize += zippedFileSize
try:
done = int(50*extractedContentSize/zipFileContentSize)
percentage = (extractedContentSize / zipFileContentSize) * 100
except ZeroDivisionError:
done = 50
percentage = 100
sys.stdout.write('\r[{}{}] {:.2f}%'.format('█' * done, '.' * (50-done), percentage))
sys.stdout.flush()
sys.stdout.write('\n')
os.chdir(config.PATH_ROOT)
return True
def InstallUpdate(self) -> bool:
print("Installing new version")
for file in os.listdir(config.CONFIGURATION['Launch']['ProjectRoot']):
if os.path.isdir(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}"):
if file in self.DeleteFolders:
shutil.rmtree(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
else: # Files
os.remove(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
# Old version is deleted
for file in os.listdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.rename(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}{file}", f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
shutil.rmtree(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
return True
def CheckVersions(self):
# Sucessful return: bool
# Unsuccessful: list[message: str, continue: bool]
self.Versions = self._GetVersions()
if type(self.Versions[1]) == bool:
return self.Versions
self.Versions[0] = self._GetVersionAsInt(self.Versions[0])
self.Versions[1] = self._GetVersionAsInt(self.Versions[1])
self.Difference = []
for installed, checked in zip(self.Versions[0], self.Versions[1]):
self.Difference.append(checked - installed)
for section in self.Difference:
if section < 0: # When working on project and updating locally
return False
elif section > 0:
return True
return False
def _GetVersions(self) -> list:
# Sucessful return: list[InstalledVersion: str, CheckedVersion: str]
# Unsucessful: list[message: str, continue: bool]
if not os.path.exists(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}"):
# This means either the configuration is incorrect, or pyLaunch isn't where it should be
# continue is False, because the project cannot be launched
return [f"Unable to locate installed version at {config.CONFIGURATION['Update']['VersionPath']}", False]
InstalledVersion = None # Local Version
CheckedVersion = None # Version on GitHub
with open(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}", "r") as f:
lines = f.readlines()
InstalledVersion = self._GetVersionFromStr(lines)
try:
response = urllib.request.urlopen(f"https://raw.githubusercontent.com/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/{config.CONFIGURATION['Update']['Branch']}{config.CONFIGURATION['Update']['VersionPath']}")
content = response.read().decode("UTF-8").split("\n")
CheckedVersion = self._GetVersionFromStr(content)
except urllib.error.HTTPError as e:
# The Project URL is invalid (cannot find Org/Repo/Branch/VersionPath) or,
# raw.githubusercontent is down, continue is True, the project can still be launched
return ["Project URL does not exist or githubusercontent is down", True] # URL doesn't exist or something went wrong
if CheckedVersion is None:
# Some other error, just to be safe.
return ["Unable to get current version from GitHub", True]
return [InstalledVersion, CheckedVersion]
def _GetVersionFromStr(self, lines: str) -> str:
ver = None
for line in lines:
line = line.strip()
if config.CONFIGURATION['Update']['Find'] in line:
ver = line[len(config.CONFIGURATION['Update']['Find']):].strip('"')
match = re.match(r"\d+\.\d+\.\d+", ver) # > #.#.#
if match:
return ver[match.start():match.end()]
return None
def _GetVersionAsInt(self, version: str) -> list:
version = version.split(".")
intVer = []
for section in version:
if section.isalnum():
newSection = ""
for char in section:
if char.isnumeric():
newSection += char
section = newSection
intVer.append(int(section))
return intVer
| 44.213115
| 269
| 0.602645
|
import os
import re
import shutil
import sys
import urllib.error
import urllib.parse
import urllib.request
from zipfile import ZipFile
import helpers.config as config
from helpers.logger import Logger
class Updater:
__instance = None
@staticmethod
def Get():
if Updater.__instance is None:
return Updater()
return Updater.__instance
def __init__(self):
if Updater.__instance is not None:
return
else:
self.log = Logger("pyLaunch.Frontend.Updater", "frontend.log")
self.DeleteFolders = ["src"]
self.UpdateFolder = "updatefiles"
def Automatic(self) -> bool:
if not self.CheckConnection():
return False
UpdateAvailable = self.CheckVersions()
if UpdateAvailable:
print(f"An update is available! [v{'.'.join(self.Versions[1])}]")
if not 'n' in input(f"Would you like to update from [{'.'.join(self.Versions[0])}]? (Y/n) > "):
if self.DownloadUpdate():
return self.InstallUpdate()
return False
def CheckConnection(self) -> str:
if config.CONFIGURATION['Update']['SkipCheck']:
return "Skipping update check"
try:
urllib.request.urlopen('http://google.com')
return True
except Exception as e:
return "Unable to connect to the internet"
def DownloadUpdate(self) -> bool:
response = None
try:
response = urllib.request.urlopen(f"https://api.github.com/repos/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/zipball/{config.CONFIGURATION['Update']['Branch']}")
except urllib.error.HTTPError as e:
print(f"Unable to download update from GitHub: {e}")
input("Press enter to continue...")
return False
if not os.path.exists(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.mkdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
with open(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}gh_download.zip", "wb") as f:
f.write(response.read())
os.chdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
zipFileContent = dict()
zipFileContentSize = 0
with ZipFile(f"gh_download.zip", 'r') as zipFile:
for name in zipFile.namelist():
zipFileContent[name] = zipFile.getinfo(name).file_size
zipFileContentSize = sum(zipFileContent.values())
extractedContentSize = 0
for zippedFileName, zippedFileSize in zipFileContent.items():
UnzippedFilePath = os.path.abspath(f"{zippedFileName}")
os.makedirs(os.path.dirname(UnzippedFilePath), exist_ok=True)
if os.path.isfile(UnzippedFilePath):
zipFileContentSize -= zippedFileSize
else:
zipFile.extract(zippedFileName, path="", pwd=None)
extractedContentSize += zippedFileSize
try:
done = int(50*extractedContentSize/zipFileContentSize)
percentage = (extractedContentSize / zipFileContentSize) * 100
except ZeroDivisionError:
done = 50
percentage = 100
sys.stdout.write('\r[{}{}] {:.2f}%'.format('█' * done, '.' * (50-done), percentage))
sys.stdout.flush()
sys.stdout.write('\n')
os.chdir(config.PATH_ROOT)
return True
def InstallUpdate(self) -> bool:
print("Installing new version")
for file in os.listdir(config.CONFIGURATION['Launch']['ProjectRoot']):
if os.path.isdir(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}"):
if file in self.DeleteFolders:
shutil.rmtree(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
else:
os.remove(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
for file in os.listdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.rename(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}{file}", f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
shutil.rmtree(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
return True
def CheckVersions(self):
self.Versions = self._GetVersions()
if type(self.Versions[1]) == bool:
return self.Versions
self.Versions[0] = self._GetVersionAsInt(self.Versions[0])
self.Versions[1] = self._GetVersionAsInt(self.Versions[1])
self.Difference = []
for installed, checked in zip(self.Versions[0], self.Versions[1]):
self.Difference.append(checked - installed)
for section in self.Difference:
if section < 0:
return False
elif section > 0:
return True
return False
def _GetVersions(self) -> list:
if not os.path.exists(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}"):
# continue is False, because the project cannot be launched
return [f"Unable to locate installed version at {config.CONFIGURATION['Update']['VersionPath']}", False]
InstalledVersion = None # Local Version
CheckedVersion = None # Version on GitHub
with open(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}", "r") as f:
lines = f.readlines()
InstalledVersion = self._GetVersionFromStr(lines)
try:
response = urllib.request.urlopen(f"https://raw.githubusercontent.com/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/{config.CONFIGURATION['Update']['Branch']}{config.CONFIGURATION['Update']['VersionPath']}")
content = response.read().decode("UTF-8").split("\n")
CheckedVersion = self._GetVersionFromStr(content)
except urllib.error.HTTPError as e:
# The Project URL is invalid (cannot find Org/Repo/Branch/VersionPath) or,
# raw.githubusercontent is down, continue is True, the project can still be launched
return ["Project URL does not exist or githubusercontent is down", True] # URL doesn't exist or something went wrong
if CheckedVersion is None:
return ["Unable to get current version from GitHub", True]
return [InstalledVersion, CheckedVersion]
def _GetVersionFromStr(self, lines: str) -> str:
ver = None
for line in lines:
line = line.strip()
if config.CONFIGURATION['Update']['Find'] in line:
ver = line[len(config.CONFIGURATION['Update']['Find']):].strip('"')
match = re.match(r"\d+\.\d+\.\d+", ver) # > #.#.#
if match:
return ver[match.start():match.end()]
return None
def _GetVersionAsInt(self, version: str) -> list:
version = version.split(".")
intVer = []
for section in version:
if section.isalnum():
newSection = ""
for char in section:
if char.isnumeric():
newSection += char
section = newSection
intVer.append(int(section))
return intVer
| true
| true
|
790891b078152a84f9a96300dd432c6aa253964b
| 24,653
|
py
|
Python
|
sdk/python/feast/on_demand_feature_view.py
|
aurobindoc/feast
|
72f155882c95f21573b31a613edf066bdb55f630
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/on_demand_feature_view.py
|
aurobindoc/feast
|
72f155882c95f21573b31a613edf066bdb55f630
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/on_demand_feature_view.py
|
aurobindoc/feast
|
72f155882c95f21573b31a613edf066bdb55f630
|
[
"Apache-2.0"
] | null | null | null |
import copy
import functools
import warnings
from types import MethodType
from typing import Dict, List, Optional, Type, Union
import dill
import pandas as pd
from feast.base_feature_view import BaseFeatureView
from feast.data_source import RequestSource
from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError
from feast.feature import Feature
from feast.feature_view import FeatureView
from feast.feature_view_projection import FeatureViewProjection
from feast.field import Field, from_value_type
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureView as OnDemandFeatureViewProto,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureViewMeta,
OnDemandFeatureViewSpec,
OnDemandSource,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
UserDefinedFunction as UserDefinedFunctionProto,
)
from feast.type_map import (
feast_value_type_to_pandas_type,
python_type_to_feast_value_type,
)
from feast.usage import log_exceptions
from feast.value_type import ValueType
warnings.simplefilter("once", DeprecationWarning)
class OnDemandFeatureView(BaseFeatureView):
"""
[Experimental] An OnDemandFeatureView defines a logical group of features that are
generated by applying a transformation on a set of input sources, such as feature
views and request data sources.
Attributes:
name: The unique name of the on demand feature view.
features: The list of features in the output of the on demand feature view.
source_feature_view_projections: A map from input source names to actual input
sources with type FeatureViewProjection.
source_request_sources: A map from input source names to the actual input
sources with type RequestSource.
udf: The user defined transformation function, which must take pandas dataframes
as inputs.
description: A human-readable description.
tags: A dictionary of key-value pairs to store arbitrary metadata.
owner: The owner of the on demand feature view, typically the email of the primary
maintainer.
"""
# TODO(adchia): remove inputs from proto and declaration
name: str
features: List[Field]
source_feature_view_projections: Dict[str, FeatureViewProjection]
source_request_sources: Dict[str, RequestSource]
udf: MethodType
description: str
tags: Dict[str, str]
owner: str
@log_exceptions
def __init__(
self,
*args,
name: Optional[str] = None,
features: Optional[List[Feature]] = None,
sources: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
udf: Optional[MethodType] = None,
inputs: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object.
Args:
name: The unique name of the on demand feature view.
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
udf (optional): The user defined transformation function, which must take pandas
dataframes as inputs.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["name", "features", "inputs", "udf"]
_name = name
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
_udf = udf
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_name = args[0]
if len(args) >= 2:
_schema = args[1]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 3:
_sources = args[2]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if len(args) >= 4:
_udf = args[3]
if not _name:
raise ValueError(
"The name of the on demand feature view must be specified."
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
super().__init__(
name=_name,
features=_schema,
description=description,
tags=tags,
owner=owner,
)
assert _sources is not None
self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {}
self.source_request_sources: Dict[str, RequestSource] = {}
for source_name, odfv_source in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[
source_name
] = odfv_source.projection
if _udf is None:
raise ValueError("The `udf` parameter must be specified.")
assert _udf
self.udf = _udf
@property
def proto_class(self) -> Type[OnDemandFeatureViewProto]:
return OnDemandFeatureViewProto
def __copy__(self):
fv = OnDemandFeatureView(
name=self.name,
schema=self.features,
sources=dict(
**self.source_feature_view_projections, **self.source_request_sources,
),
udf=self.udf,
description=self.description,
tags=self.tags,
owner=self.owner,
)
fv.projection = copy.copy(self.projection)
return fv
def __eq__(self, other):
if not super().__eq__(other):
return False
if (
not self.source_feature_view_projections
== other.source_feature_view_projections
or not self.source_request_sources == other.source_request_sources
or not self.udf.__code__.co_code == other.udf.__code__.co_code
):
return False
return True
def __hash__(self):
return super().__hash__()
def to_proto(self) -> OnDemandFeatureViewProto:
"""
Converts an on demand feature view object to its protobuf representation.
Returns:
A OnDemandFeatureViewProto protobuf.
"""
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for source_name, fv_projection in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(
feature_view_projection=fv_projection.to_proto()
)
for (source_name, request_sources,) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(
request_data_source=request_sources.to_proto()
)
spec = OnDemandFeatureViewSpec(
name=self.name,
features=[feature.to_proto() for feature in self.features],
sources=sources,
user_defined_function=UserDefinedFunctionProto(
name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True),
),
description=self.description,
tags=self.tags,
owner=self.owner,
)
return OnDemandFeatureViewProto(spec=spec, meta=meta)
@classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
"""
Creates an on demand feature view from a protobuf representation.
Args:
on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.
Returns:
A OnDemandFeatureView object based on the on-demand feature view protobuf.
"""
sources = {}
for (
source_name,
on_demand_source,
) in on_demand_feature_view_proto.spec.sources.items():
if on_demand_source.WhichOneof("source") == "feature_view":
sources[source_name] = FeatureView.from_proto(
on_demand_source.feature_view
).projection
elif on_demand_source.WhichOneof("source") == "feature_view_projection":
sources[source_name] = FeatureViewProjection.from_proto(
on_demand_source.feature_view_projection
)
else:
sources[source_name] = RequestSource.from_proto(
on_demand_source.request_data_source
)
on_demand_feature_view_obj = cls(
name=on_demand_feature_view_proto.spec.name,
schema=[
Field(
name=feature.name,
dtype=from_value_type(ValueType(feature.value_type)),
)
for feature in on_demand_feature_view_proto.spec.features
],
sources=sources,
udf=dill.loads(
on_demand_feature_view_proto.spec.user_defined_function.body
),
description=on_demand_feature_view_proto.spec.description,
tags=dict(on_demand_feature_view_proto.spec.tags),
owner=on_demand_feature_view_proto.spec.owner,
)
# FeatureViewProjections are not saved in the OnDemandFeatureView proto.
# Create the default projection.
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(
on_demand_feature_view_obj
)
if on_demand_feature_view_proto.meta.HasField("created_timestamp"):
on_demand_feature_view_obj.created_timestamp = (
on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
)
if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"):
on_demand_feature_view_obj.last_updated_timestamp = (
on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
)
return on_demand_feature_view_obj
def get_request_data_schema(self) -> Dict[str, ValueType]:
schema: Dict[str, ValueType] = {}
for request_source in self.source_request_sources.values():
if isinstance(request_source.schema, List):
new_schema = {}
for field in request_source.schema:
new_schema[field.name] = field.dtype.to_value_type()
schema.update(new_schema)
elif isinstance(request_source.schema, Dict):
schema.update(request_source.schema)
else:
raise Exception(
f"Request source schema is not correct type: ${str(type(request_source.schema))}"
)
return schema
def get_transformed_features_df(
self, df_with_features: pd.DataFrame, full_feature_names: bool = False,
) -> pd.DataFrame:
# Apply on demand transformations
columns_to_cleanup = []
for source_fv_projection in self.source_feature_view_projections.values():
for feature in source_fv_projection.features:
full_feature_ref = f"{source_fv_projection.name}__{feature.name}"
if full_feature_ref in df_with_features.keys():
# Make sure the partial feature name is always present
df_with_features[feature.name] = df_with_features[full_feature_ref]
columns_to_cleanup.append(feature.name)
elif feature.name in df_with_features.keys():
# Make sure the full feature name is always present
df_with_features[full_feature_ref] = df_with_features[feature.name]
columns_to_cleanup.append(full_feature_ref)
# Compute transformed values and apply to each result row
df_with_transformed_features = self.udf.__call__(df_with_features)
# Work out whether the correct columns names are used.
rename_columns: Dict[str, str] = {}
for feature in self.features:
short_name = feature.name
long_name = f"{self.projection.name_to_use()}__{feature.name}"
if (
short_name in df_with_transformed_features.columns
and full_feature_names
):
rename_columns[short_name] = long_name
elif not full_feature_names:
# Long name must be in dataframe.
rename_columns[long_name] = short_name
# Cleanup extra columns used for transformation
df_with_features.drop(columns=columns_to_cleanup, inplace=True)
return df_with_transformed_features.rename(columns=rename_columns)
def infer_features(self):
"""
Infers the set of features associated to this feature view from the input source.
Raises:
RegistryInferenceFailure: The set of features could not be inferred.
"""
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series(
dtype=dtype
)
df[f"{feature.name}"] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f"{field.name}"] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for f, dt in zip(output_df.columns, output_df.dtypes):
inferred_features.append(
Field(
name=f,
dtype=from_value_type(
python_type_to_feast_value_type(f, type_name=str(dt))
),
)
)
if self.features:
missing_features = []
for specified_features in self.features:
if specified_features not in inferred_features:
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError(
[f.name for f in missing_features], self.name
)
else:
self.features = inferred_features
if not self.features:
raise RegistryInferenceFailure(
"OnDemandFeatureView",
f"Could not infer Features for the feature view '{self.name}'.",
)
@staticmethod
def get_requested_odfvs(feature_refs, project, registry):
all_on_demand_feature_views = registry.list_on_demand_feature_views(
project, allow_cache=True
)
requested_on_demand_feature_views: List[OnDemandFeatureView] = []
for odfv in all_on_demand_feature_views:
for feature in odfv.features:
if f"{odfv.name}:{feature.name}" in feature_refs:
requested_on_demand_feature_views.append(odfv)
break
return requested_on_demand_feature_views
# TODO(felixwang9817): Force this decorator to accept kwargs and switch from
# `features` to `schema`.
def on_demand_feature_view(
*args,
features: Optional[List[Feature]] = None,
sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object with the given user function as udf.
Args:
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["features", "inputs"]
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_schema = args[0]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 2:
_sources = args[1]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
sources=_sources,
schema=_schema,
udf=user_function,
description=description,
tags=tags,
owner=owner,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator
| 42.14188
| 111
| 0.612461
|
import copy
import functools
import warnings
from types import MethodType
from typing import Dict, List, Optional, Type, Union
import dill
import pandas as pd
from feast.base_feature_view import BaseFeatureView
from feast.data_source import RequestSource
from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError
from feast.feature import Feature
from feast.feature_view import FeatureView
from feast.feature_view_projection import FeatureViewProjection
from feast.field import Field, from_value_type
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureView as OnDemandFeatureViewProto,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureViewMeta,
OnDemandFeatureViewSpec,
OnDemandSource,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
UserDefinedFunction as UserDefinedFunctionProto,
)
from feast.type_map import (
feast_value_type_to_pandas_type,
python_type_to_feast_value_type,
)
from feast.usage import log_exceptions
from feast.value_type import ValueType
warnings.simplefilter("once", DeprecationWarning)
class OnDemandFeatureView(BaseFeatureView):
name: str
features: List[Field]
source_feature_view_projections: Dict[str, FeatureViewProjection]
source_request_sources: Dict[str, RequestSource]
udf: MethodType
description: str
tags: Dict[str, str]
owner: str
@log_exceptions
def __init__(
self,
*args,
name: Optional[str] = None,
features: Optional[List[Feature]] = None,
sources: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
udf: Optional[MethodType] = None,
inputs: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
positional_attributes = ["name", "features", "inputs", "udf"]
_name = name
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
_udf = udf
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_name = args[0]
if len(args) >= 2:
_schema = args[1]
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 3:
_sources = args[2]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if len(args) >= 4:
_udf = args[3]
if not _name:
raise ValueError(
"The name of the on demand feature view must be specified."
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
super().__init__(
name=_name,
features=_schema,
description=description,
tags=tags,
owner=owner,
)
assert _sources is not None
self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {}
self.source_request_sources: Dict[str, RequestSource] = {}
for source_name, odfv_source in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[
source_name
] = odfv_source.projection
if _udf is None:
raise ValueError("The `udf` parameter must be specified.")
assert _udf
self.udf = _udf
@property
def proto_class(self) -> Type[OnDemandFeatureViewProto]:
return OnDemandFeatureViewProto
def __copy__(self):
fv = OnDemandFeatureView(
name=self.name,
schema=self.features,
sources=dict(
**self.source_feature_view_projections, **self.source_request_sources,
),
udf=self.udf,
description=self.description,
tags=self.tags,
owner=self.owner,
)
fv.projection = copy.copy(self.projection)
return fv
def __eq__(self, other):
if not super().__eq__(other):
return False
if (
not self.source_feature_view_projections
== other.source_feature_view_projections
or not self.source_request_sources == other.source_request_sources
or not self.udf.__code__.co_code == other.udf.__code__.co_code
):
return False
return True
def __hash__(self):
return super().__hash__()
def to_proto(self) -> OnDemandFeatureViewProto:
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for source_name, fv_projection in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(
feature_view_projection=fv_projection.to_proto()
)
for (source_name, request_sources,) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(
request_data_source=request_sources.to_proto()
)
spec = OnDemandFeatureViewSpec(
name=self.name,
features=[feature.to_proto() for feature in self.features],
sources=sources,
user_defined_function=UserDefinedFunctionProto(
name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True),
),
description=self.description,
tags=self.tags,
owner=self.owner,
)
return OnDemandFeatureViewProto(spec=spec, meta=meta)
@classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
sources = {}
for (
source_name,
on_demand_source,
) in on_demand_feature_view_proto.spec.sources.items():
if on_demand_source.WhichOneof("source") == "feature_view":
sources[source_name] = FeatureView.from_proto(
on_demand_source.feature_view
).projection
elif on_demand_source.WhichOneof("source") == "feature_view_projection":
sources[source_name] = FeatureViewProjection.from_proto(
on_demand_source.feature_view_projection
)
else:
sources[source_name] = RequestSource.from_proto(
on_demand_source.request_data_source
)
on_demand_feature_view_obj = cls(
name=on_demand_feature_view_proto.spec.name,
schema=[
Field(
name=feature.name,
dtype=from_value_type(ValueType(feature.value_type)),
)
for feature in on_demand_feature_view_proto.spec.features
],
sources=sources,
udf=dill.loads(
on_demand_feature_view_proto.spec.user_defined_function.body
),
description=on_demand_feature_view_proto.spec.description,
tags=dict(on_demand_feature_view_proto.spec.tags),
owner=on_demand_feature_view_proto.spec.owner,
)
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(
on_demand_feature_view_obj
)
if on_demand_feature_view_proto.meta.HasField("created_timestamp"):
on_demand_feature_view_obj.created_timestamp = (
on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
)
if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"):
on_demand_feature_view_obj.last_updated_timestamp = (
on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
)
return on_demand_feature_view_obj
def get_request_data_schema(self) -> Dict[str, ValueType]:
schema: Dict[str, ValueType] = {}
for request_source in self.source_request_sources.values():
if isinstance(request_source.schema, List):
new_schema = {}
for field in request_source.schema:
new_schema[field.name] = field.dtype.to_value_type()
schema.update(new_schema)
elif isinstance(request_source.schema, Dict):
schema.update(request_source.schema)
else:
raise Exception(
f"Request source schema is not correct type: ${str(type(request_source.schema))}"
)
return schema
def get_transformed_features_df(
self, df_with_features: pd.DataFrame, full_feature_names: bool = False,
) -> pd.DataFrame:
columns_to_cleanup = []
for source_fv_projection in self.source_feature_view_projections.values():
for feature in source_fv_projection.features:
full_feature_ref = f"{source_fv_projection.name}__{feature.name}"
if full_feature_ref in df_with_features.keys():
df_with_features[feature.name] = df_with_features[full_feature_ref]
columns_to_cleanup.append(feature.name)
elif feature.name in df_with_features.keys():
df_with_features[full_feature_ref] = df_with_features[feature.name]
columns_to_cleanup.append(full_feature_ref)
df_with_transformed_features = self.udf.__call__(df_with_features)
rename_columns: Dict[str, str] = {}
for feature in self.features:
short_name = feature.name
long_name = f"{self.projection.name_to_use()}__{feature.name}"
if (
short_name in df_with_transformed_features.columns
and full_feature_names
):
rename_columns[short_name] = long_name
elif not full_feature_names:
rename_columns[long_name] = short_name
df_with_features.drop(columns=columns_to_cleanup, inplace=True)
return df_with_transformed_features.rename(columns=rename_columns)
def infer_features(self):
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series(
dtype=dtype
)
df[f"{feature.name}"] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f"{field.name}"] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for f, dt in zip(output_df.columns, output_df.dtypes):
inferred_features.append(
Field(
name=f,
dtype=from_value_type(
python_type_to_feast_value_type(f, type_name=str(dt))
),
)
)
if self.features:
missing_features = []
for specified_features in self.features:
if specified_features not in inferred_features:
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError(
[f.name for f in missing_features], self.name
)
else:
self.features = inferred_features
if not self.features:
raise RegistryInferenceFailure(
"OnDemandFeatureView",
f"Could not infer Features for the feature view '{self.name}'.",
)
@staticmethod
def get_requested_odfvs(feature_refs, project, registry):
all_on_demand_feature_views = registry.list_on_demand_feature_views(
project, allow_cache=True
)
requested_on_demand_feature_views: List[OnDemandFeatureView] = []
for odfv in all_on_demand_feature_views:
for feature in odfv.features:
if f"{odfv.name}:{feature.name}" in feature_refs:
requested_on_demand_feature_views.append(odfv)
break
return requested_on_demand_feature_views
def on_demand_feature_view(
*args,
features: Optional[List[Feature]] = None,
sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
positional_attributes = ["features", "inputs"]
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_schema = args[0]
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 2:
_sources = args[1]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
sources=_sources,
schema=_schema,
udf=user_function,
description=description,
tags=tags,
owner=owner,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator
| true
| true
|
79089232b5368ff1978581e4108556255ea57c67
| 3,794
|
py
|
Python
|
backend/settings.py
|
jesusmaherrera/django-nuxtjs
|
f8d9500fb236c4cd938e9a6bbaf8063e545dd6fe
|
[
"MIT"
] | null | null | null |
backend/settings.py
|
jesusmaherrera/django-nuxtjs
|
f8d9500fb236c4cd938e9a6bbaf8063e545dd6fe
|
[
"MIT"
] | null | null | null |
backend/settings.py
|
jesusmaherrera/django-nuxtjs
|
f8d9500fb236c4cd938e9a6bbaf8063e545dd6fe
|
[
"MIT"
] | null | null | null |
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from datetime import timedelta
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2(iwreobf4b(-=h_p=^!obgxdgn3_*s!17=_3wc4dun9_y^q+c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'backend.core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_URL = "/api/v1/signin"
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=60),
"REFRESH_TOKEN_LIFETIME": timedelta(days=2),
}
CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": ["rest_framework_simplejwt.authentication.JWTAuthentication"],
"DEFAULT_RENDERER_CLASSES": ["rest_framework.renderers.JSONRenderer"],
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.DjangoModelPermissions",),
}
| 27.1
| 100
| 0.707433
|
from pathlib import Path
from datetime import timedelta
import os
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '2(iwreobf4b(-=h_p=^!obgxdgn3_*s!17=_3wc4dun9_y^q+c'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'backend.core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_URL = "/api/v1/signin"
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=60),
"REFRESH_TOKEN_LIFETIME": timedelta(days=2),
}
CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": ["rest_framework_simplejwt.authentication.JWTAuthentication"],
"DEFAULT_RENDERER_CLASSES": ["rest_framework.renderers.JSONRenderer"],
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.DjangoModelPermissions",),
}
| true
| true
|
790892d7dd0cd652cb37f1aeebff79a3c0d23795
| 5,852
|
py
|
Python
|
pytorch-pretrained-bert/src/gen_pt_squad.py
|
lianapanatau/BERT-for-RRC-ABSA
|
d31d81d5f9dce594a23f256199988fc2a11ce016
|
[
"Apache-2.0"
] | 425
|
2019-03-31T02:22:29.000Z
|
2022-03-26T06:55:34.000Z
|
pytorch-pretrained-bert/src/gen_pt_squad.py
|
lianapanatau/BERT-for-RRC-ABSA
|
d31d81d5f9dce594a23f256199988fc2a11ce016
|
[
"Apache-2.0"
] | 23
|
2019-04-27T09:26:08.000Z
|
2021-11-10T10:18:30.000Z
|
pytorch-pretrained-bert/src/gen_pt_squad.py
|
lianapanatau/BERT-for-RRC-ABSA
|
d31d81d5f9dce594a23f256199988fc2a11ce016
|
[
"Apache-2.0"
] | 86
|
2019-04-09T06:41:29.000Z
|
2022-03-14T02:11:56.000Z
|
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
import squad_data_utils as data_utils
import modelconfig
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def gen(args):
tokenizer = BertTokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model] )
train_examples = data_utils.read_squad_examples(os.path.join(args.input_dir, "train.json"), is_training=True)
train_features = data_utils.convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
input_ids_np = np.array([f.input_ids for f in train_features], dtype=np.int16)
segment_ids_np = np.array([f.segment_ids for f in train_features], dtype=np.int16)
input_mask_np = np.array([f.input_mask for f in train_features], dtype=np.int16)
start_positions_np = np.array([f.start_position for f in train_features], dtype=np.int16)
end_positions_np = np.array([f.end_position for f in train_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "data.npz"),
input_ids=input_ids_np,
segment_ids = segment_ids_np,
input_mask = input_mask_np,
start_positions = start_positions_np,
end_positions = end_positions_np)
#>>>>> validation
valid_examples=data_utils.read_squad_examples(os.path.join(args.input_dir,"dev.json"), is_training=True)
valid_features = data_utils.convert_examples_to_features(
valid_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info(" Num orig examples = %d", len(valid_examples))
logger.info(" Num split examples = %d", len(valid_features))
valid_input_ids_np = np.array([f.input_ids for f in valid_features], dtype=np.int16)
valid_segment_ids_np = np.array([f.segment_ids for f in valid_features], dtype=np.int16)
valid_input_mask_np = np.array([f.input_mask for f in valid_features], dtype=np.int16)
valid_start_positions_np = np.array([f.start_position for f in valid_features], dtype=np.int16)
valid_end_positions_np = np.array([f.end_position for f in valid_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "dev.npz"),
input_ids=valid_input_ids_np,
segment_ids = valid_segment_ids_np,
input_mask = valid_input_mask_np,
start_positions = valid_start_positions_np,
end_positions = valid_end_positions_np)
#<<<<< end of validation declaration
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert-model", default='bert-base', type=str)
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=320,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--seed',
type=int,
default=0,
help="random seed for initialization")
parser.add_argument('--doc_stride',
type=int,
default=128)
parser.add_argument('--max_query_length',
type=int,
default=30)
parser.add_argument('--max_answer_length',
type=int,
default=30)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
gen(args)
if __name__=="__main__":
main()
| 42.100719
| 134
| 0.641319
|
import os
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
import squad_data_utils as data_utils
import modelconfig
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def gen(args):
tokenizer = BertTokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model] )
train_examples = data_utils.read_squad_examples(os.path.join(args.input_dir, "train.json"), is_training=True)
train_features = data_utils.convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
input_ids_np = np.array([f.input_ids for f in train_features], dtype=np.int16)
segment_ids_np = np.array([f.segment_ids for f in train_features], dtype=np.int16)
input_mask_np = np.array([f.input_mask for f in train_features], dtype=np.int16)
start_positions_np = np.array([f.start_position for f in train_features], dtype=np.int16)
end_positions_np = np.array([f.end_position for f in train_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "data.npz"),
input_ids=input_ids_np,
segment_ids = segment_ids_np,
input_mask = input_mask_np,
start_positions = start_positions_np,
end_positions = end_positions_np)
valid_examples=data_utils.read_squad_examples(os.path.join(args.input_dir,"dev.json"), is_training=True)
valid_features = data_utils.convert_examples_to_features(
valid_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info(" Num orig examples = %d", len(valid_examples))
logger.info(" Num split examples = %d", len(valid_features))
valid_input_ids_np = np.array([f.input_ids for f in valid_features], dtype=np.int16)
valid_segment_ids_np = np.array([f.segment_ids for f in valid_features], dtype=np.int16)
valid_input_mask_np = np.array([f.input_mask for f in valid_features], dtype=np.int16)
valid_start_positions_np = np.array([f.start_position for f in valid_features], dtype=np.int16)
valid_end_positions_np = np.array([f.end_position for f in valid_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "dev.npz"),
input_ids=valid_input_ids_np,
segment_ids = valid_segment_ids_np,
input_mask = valid_input_mask_np,
start_positions = valid_start_positions_np,
end_positions = valid_end_positions_np)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert-model", default='bert-base', type=str)
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
gument("--max_seq_length",
default=320,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--seed',
type=int,
default=0,
help="random seed for initialization")
parser.add_argument('--doc_stride',
type=int,
default=128)
parser.add_argument('--max_query_length',
type=int,
default=30)
parser.add_argument('--max_answer_length',
type=int,
default=30)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
gen(args)
if __name__=="__main__":
main()
| true
| true
|
790892ec8c9e4b914f99286d9ace57de42933776
| 4,932
|
py
|
Python
|
src/fairsharing_client/api.py
|
cthoyt/fairsharing-client
|
c5a7a7caeb488b5fe3693057e2fd4a3ad4e792e0
|
[
"MIT"
] | null | null | null |
src/fairsharing_client/api.py
|
cthoyt/fairsharing-client
|
c5a7a7caeb488b5fe3693057e2fd4a3ad4e792e0
|
[
"MIT"
] | null | null | null |
src/fairsharing_client/api.py
|
cthoyt/fairsharing-client
|
c5a7a7caeb488b5fe3693057e2fd4a3ad4e792e0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Access to FAIRsharing via its API.
.. seealso:: https://beta.fairsharing.org/API_doc
"""
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pystow
import requests
import yaml
from tqdm import tqdm
__all__ = [
"ensure_fairsharing",
"load_fairsharing",
"FairsharingClient",
]
PATH = pystow.join("bio", "fairsharing", name="fairsharing.yaml")
def load_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file)
def ensure_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
if PATH.exists() and not force_download:
return PATH
client = FairsharingClient(**kwargs)
# As of 2021-12-13, there are a bit less than 4k records that take about 3 minutes to download
rv = {
row["prefix"]: row
for row in tqdm(
client.iter_records(),
unit_scale=True,
unit="record",
desc="Downloading FAIRsharing",
disable=not use_tqdm,
)
}
with PATH.open("w") as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH
# These fields are the same in each record
REDUNDANT_FIELDS = {
"fairsharing-licence",
}
class FairsharingClient:
"""A client for programmatic access to the FAIRsharing private API."""
def __init__(
self,
login: Optional[str] = None,
password: Optional[str] = None,
base_url: Optional[str] = None,
):
"""Instantiate the client and get an appropriate JWT token.
:param login: FAIRsharing username
:param password: Corresponding FAIRsharing password
:param base_url: The base URL
"""
self.base_url = base_url or "https://api.fairsharing.org"
self.signin_url = f"{self.base_url}/users/sign_in"
self.records_url = f"{self.base_url}/fairsharing_records"
self.username = pystow.get_config(
"fairsharing", "login", passthrough=login, raise_on_missing=True
)
self.password = pystow.get_config(
"fairsharing", "password", passthrough=password, raise_on_missing=True
)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {self.jwt}",
}
)
def get_jwt(self) -> str:
"""Get the JWT."""
payload = {
"user": {
"login": self.username,
"password": self.password,
},
}
res = requests.post(self.signin_url, json=payload).json()
return res["jwt"]
def iter_records(self) -> Iterable[Mapping[str, Any]]:
"""Iterate over all FAIRsharing records."""
yield from self._iter_records_helper(self.records_url)
def _preprocess_record(
self, record: MutableMapping[str, Any]
) -> Optional[MutableMapping[str, Any]]:
if "type" in record:
del record["type"]
record = {"id": record["id"], **record["attributes"]}
doi = record.get("doi")
if doi is None:
# Records without a DOI can't be resolved
url = record["url"]
if not url.startswith("https://fairsharing.org/fairsharing_records/"):
tqdm.write(f"{record['id']} has no DOI: {record['url']}")
return None
elif doi.startswith("10.25504/"):
record["prefix"] = record.pop("doi")[len("10.25504/") :]
else:
tqdm.write(f"DOI has unexpected prefix: {record['doi']}")
record["description"] = _removeprefix(
record.get("description"), "This FAIRsharing record describes: "
)
record["name"] = _removeprefix(record.get("name"), "FAIRsharing record for: ")
for key in REDUNDANT_FIELDS:
if key in record:
del record[key]
return record
def _iter_records_helper(self, url: str) -> Iterable[Mapping[str, Any]]:
res = self.session.get(url).json()
for record in res["data"]:
yv = self._preprocess_record(record)
if yv:
yield yv
next_url = res["links"].get("next")
if next_url:
yield from self._iter_records_helper(next_url)
def _removeprefix(s: Optional[str], prefix) -> Optional[str]:
if s is None:
return None
if s.startswith(prefix):
return s[len(prefix) :]
return s
if __name__ == "__main__":
ensure_fairsharing(force_download=True)
| 31.414013
| 98
| 0.59854
|
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pystow
import requests
import yaml
from tqdm import tqdm
__all__ = [
"ensure_fairsharing",
"load_fairsharing",
"FairsharingClient",
]
PATH = pystow.join("bio", "fairsharing", name="fairsharing.yaml")
def load_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file)
def ensure_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
if PATH.exists() and not force_download:
return PATH
client = FairsharingClient(**kwargs)
rv = {
row["prefix"]: row
for row in tqdm(
client.iter_records(),
unit_scale=True,
unit="record",
desc="Downloading FAIRsharing",
disable=not use_tqdm,
)
}
with PATH.open("w") as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH
REDUNDANT_FIELDS = {
"fairsharing-licence",
}
class FairsharingClient:
def __init__(
self,
login: Optional[str] = None,
password: Optional[str] = None,
base_url: Optional[str] = None,
):
self.base_url = base_url or "https://api.fairsharing.org"
self.signin_url = f"{self.base_url}/users/sign_in"
self.records_url = f"{self.base_url}/fairsharing_records"
self.username = pystow.get_config(
"fairsharing", "login", passthrough=login, raise_on_missing=True
)
self.password = pystow.get_config(
"fairsharing", "password", passthrough=password, raise_on_missing=True
)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {self.jwt}",
}
)
def get_jwt(self) -> str:
payload = {
"user": {
"login": self.username,
"password": self.password,
},
}
res = requests.post(self.signin_url, json=payload).json()
return res["jwt"]
def iter_records(self) -> Iterable[Mapping[str, Any]]:
yield from self._iter_records_helper(self.records_url)
def _preprocess_record(
self, record: MutableMapping[str, Any]
) -> Optional[MutableMapping[str, Any]]:
if "type" in record:
del record["type"]
record = {"id": record["id"], **record["attributes"]}
doi = record.get("doi")
if doi is None:
url = record["url"]
if not url.startswith("https://fairsharing.org/fairsharing_records/"):
tqdm.write(f"{record['id']} has no DOI: {record['url']}")
return None
elif doi.startswith("10.25504/"):
record["prefix"] = record.pop("doi")[len("10.25504/") :]
else:
tqdm.write(f"DOI has unexpected prefix: {record['doi']}")
record["description"] = _removeprefix(
record.get("description"), "This FAIRsharing record describes: "
)
record["name"] = _removeprefix(record.get("name"), "FAIRsharing record for: ")
for key in REDUNDANT_FIELDS:
if key in record:
del record[key]
return record
def _iter_records_helper(self, url: str) -> Iterable[Mapping[str, Any]]:
res = self.session.get(url).json()
for record in res["data"]:
yv = self._preprocess_record(record)
if yv:
yield yv
next_url = res["links"].get("next")
if next_url:
yield from self._iter_records_helper(next_url)
def _removeprefix(s: Optional[str], prefix) -> Optional[str]:
if s is None:
return None
if s.startswith(prefix):
return s[len(prefix) :]
return s
if __name__ == "__main__":
ensure_fairsharing(force_download=True)
| true
| true
|
790893c4293b9fc10e5e43e98250c6d68c96c7fc
| 429
|
py
|
Python
|
shrike/compliant_logging/constants.py
|
Anbang-Hu/shrike
|
78189984c85696a9a9feaadb72aa471cf2409796
|
[
"MIT"
] | 27
|
2021-05-27T00:01:24.000Z
|
2022-01-30T19:55:24.000Z
|
shrike/compliant_logging/constants.py
|
Anbang-Hu/shrike
|
78189984c85696a9a9feaadb72aa471cf2409796
|
[
"MIT"
] | 284
|
2021-05-12T22:26:41.000Z
|
2022-02-23T21:18:34.000Z
|
shrike/compliant_logging/constants.py
|
Anbang-Hu/shrike
|
78189984c85696a9a9feaadb72aa471cf2409796
|
[
"MIT"
] | 5
|
2021-06-02T04:51:47.000Z
|
2021-12-20T17:07:41.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Constant values used by this library.
"""
from enum import Enum
class DataCategory(Enum):
"""
Enumeration of data categories in compliant machine learning.
Values:
- PRIVATE: data which is private. Researchers may not view this.
- PUBLIC: data which may safely be viewed by researchers.
"""
PRIVATE = 1
PUBLIC = 2
| 19.5
| 68
| 0.687646
|
from enum import Enum
class DataCategory(Enum):
PRIVATE = 1
PUBLIC = 2
| true
| true
|
7908940a5dc50383e71178011fbc5ed2d2d03c8a
| 410
|
py
|
Python
|
indicadordepassagem.py
|
renarfreitas/Coursera
|
64b766175eb26aef4a4a68b7d25309c7aa0f136b
|
[
"MIT"
] | null | null | null |
indicadordepassagem.py
|
renarfreitas/Coursera
|
64b766175eb26aef4a4a68b7d25309c7aa0f136b
|
[
"MIT"
] | null | null | null |
indicadordepassagem.py
|
renarfreitas/Coursera
|
64b766175eb26aef4a4a68b7d25309c7aa0f136b
|
[
"MIT"
] | null | null | null |
decrescente = True
anterior = int(input("Digite o primeiro número da sequência: "))
valor = 1
while valor != 0 and decrescente:
valor = int(input("Digite o próximo número da sequência: "))
if valor > anterior:
decrescente = False
anterior = valor
if decrescente:
print("A sequência está em ordem decrescente! :-) ")
else:
print("A sequência não está em ordem decrescente! :-)")
| 25.625
| 65
| 0.673171
|
decrescente = True
anterior = int(input("Digite o primeiro número da sequência: "))
valor = 1
while valor != 0 and decrescente:
valor = int(input("Digite o próximo número da sequência: "))
if valor > anterior:
decrescente = False
anterior = valor
if decrescente:
print("A sequência está em ordem decrescente! :-) ")
else:
print("A sequência não está em ordem decrescente! :-)")
| true
| true
|
79089504bee02d9e3eeed70d880013a27c3afe44
| 434
|
py
|
Python
|
prpr/config.py
|
salmiakki/prpr
|
2a50c1aa9e3799ec915e56323bb9fce15727d530
|
[
"MIT"
] | 2
|
2021-05-09T20:24:36.000Z
|
2021-05-12T09:01:07.000Z
|
prpr/config.py
|
salmiakki/prpr
|
2a50c1aa9e3799ec915e56323bb9fce15727d530
|
[
"MIT"
] | 35
|
2021-05-15T12:26:44.000Z
|
2021-08-30T10:06:47.000Z
|
prpr/config.py
|
salmiakki/prpr
|
2a50c1aa9e3799ec915e56323bb9fce15727d530
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import yaml
from loguru import logger
CONFIG_FILENAME = ".prpr.yaml"
def get_config():
config_path = Path.home() / CONFIG_FILENAME
if not config_path.exists():
logger.error(f"{CONFIG_FILENAME} not found in your home directory 😿")
exit(1)
logger.debug(f"Reading config from {config_path}...")
with open(config_path) as f:
return yaml.load(f, Loader=yaml.SafeLoader)
| 25.529412
| 77
| 0.693548
|
from pathlib import Path
import yaml
from loguru import logger
CONFIG_FILENAME = ".prpr.yaml"
def get_config():
config_path = Path.home() / CONFIG_FILENAME
if not config_path.exists():
logger.error(f"{CONFIG_FILENAME} not found in your home directory 😿")
exit(1)
logger.debug(f"Reading config from {config_path}...")
with open(config_path) as f:
return yaml.load(f, Loader=yaml.SafeLoader)
| true
| true
|
7908958cd40a79eff1a370dfe566122bbf36b6ca
| 540
|
py
|
Python
|
hospital/manage.py
|
thirdgroup/Hospital
|
4d8c84600a56eccfcdbf9482927ce2f36ee59f96
|
[
"Apache-2.0"
] | null | null | null |
hospital/manage.py
|
thirdgroup/Hospital
|
4d8c84600a56eccfcdbf9482927ce2f36ee59f96
|
[
"Apache-2.0"
] | null | null | null |
hospital/manage.py
|
thirdgroup/Hospital
|
4d8c84600a56eccfcdbf9482927ce2f36ee59f96
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hospital.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.75
| 73
| 0.687037
|
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hospital.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
7908958fba0447951654d7fcc10551322a175cac
| 4,364
|
py
|
Python
|
src/batou/lib/mercurial.py
|
risclog-solution/batou
|
2d149371ef78e4ca8368c3a9067452cd54318314
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-12-23T18:26:46.000Z
|
2020-12-23T18:26:46.000Z
|
src/batou/lib/mercurial.py
|
risclog-solution/batou
|
2d149371ef78e4ca8368c3a9067452cd54318314
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/batou/lib/mercurial.py
|
risclog-solution/batou
|
2d149371ef78e4ca8368c3a9067452cd54318314
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from batou import UpdateNeeded, output
from batou.component import Component
from batou.lib.file import Directory
from batou.utils import CmdExecutionError
import os.path
import re
class Clone(Component):
namevar = "url"
target = "."
revision = None
branch = None
vcs_update = True
_revision_pattern = re.compile(r"changeset: +\d+:([a-f0-9]+)")
def configure(self):
if (not self.revision_or_branch) or (self.revision and self.branch):
raise ValueError(
"Clone(%s) needs exactly one of revision or branch" % self.url)
self.target = self.map(self.target)
self += Directory(self.target)
def verify(self):
with self.chdir(self.target):
if not os.path.exists(".hg"):
raise UpdateNeeded()
if not self.vcs_update:
return
if self.has_outgoing_changesets():
output.annotate(
"Hg clone at {} has outgoing changesets.".format(
self.target),
red=True,
)
if self.has_changes():
output.annotate(
"Hg clone at {} is dirty, going to lose changes.".format(
self.target),
red=True,
)
raise UpdateNeeded()
if self.revision:
long_rev = len(self.revision) == 40
if self.current_revision(long_rev) != self.revision:
raise UpdateNeeded()
if self.branch and (self.current_branch() != self.branch
or self.has_incoming_changesets()):
raise UpdateNeeded()
@property
def revision_or_branch(self):
# Mercurial often takes either a revision or a branch.
return self.revision or self.branch
def current_revision(self, long=False):
debug = "--debug" if long else ""
stdout, stderr = self.cmd(
self.expand(
"LANG=C hg --cwd {{component.target}} {{debug}} parent | "
"grep changeset:",
debug=debug,
))
match = self._revision_pattern.search(stdout)
if not match:
return None
return match.group(1)
def current_branch(self):
stdout, stderr = self.cmd("hg branch")
return stdout.strip()
def has_incoming_changesets(self):
try:
self.cmd("hg incoming -q -l1")
except CmdExecutionError as e:
if e.returncode == 1:
return False
raise
return True
def has_outgoing_changesets(self):
try:
with self.chdir(self.target):
self.cmd("hg outgoing -q -l1")
except CmdExecutionError as e:
if e.returncode == 1:
return False
raise
return True
def has_changes(self):
with self.chdir(self.target):
stdout, stderr = self.cmd("hg status")
return bool(stdout.strip())
def update(self):
with self.chdir(self.target):
if not os.path.exists(".hg"):
self.cmd(
self.expand("hg clone -u {{component.revision_or_branch}} "
"{{component.url}} ."))
return
self.cmd(
self.expand("hg pull --rev {{component.revision_or_branch}}"))
for filepath in self.untracked_files():
os.unlink(os.path.join(self.target, filepath))
self.cmd(
self.expand(
"hg update --clean --rev {{component.revision_or_branch}}")
)
def untracked_files(self):
stdout, stderr = self.cmd("hg status -q -u")
items = (line.split(None, 1) for line in stdout.splitlines())
return [filepath for status, filepath in items if status == "?"]
def last_updated(self):
with self.chdir(self.target):
if not os.path.exists(".hg"):
return None
stdout, stderr = self.cmd(
'hg log -r %s --template "{date|hgdate}\n"' %
self.current_revision())
timestamp, offset = stdout.split()
return float(timestamp) - float(offset)
| 32.81203
| 79
| 0.528873
|
from batou import UpdateNeeded, output
from batou.component import Component
from batou.lib.file import Directory
from batou.utils import CmdExecutionError
import os.path
import re
class Clone(Component):
namevar = "url"
target = "."
revision = None
branch = None
vcs_update = True
_revision_pattern = re.compile(r"changeset: +\d+:([a-f0-9]+)")
def configure(self):
if (not self.revision_or_branch) or (self.revision and self.branch):
raise ValueError(
"Clone(%s) needs exactly one of revision or branch" % self.url)
self.target = self.map(self.target)
self += Directory(self.target)
def verify(self):
with self.chdir(self.target):
if not os.path.exists(".hg"):
raise UpdateNeeded()
if not self.vcs_update:
return
if self.has_outgoing_changesets():
output.annotate(
"Hg clone at {} has outgoing changesets.".format(
self.target),
red=True,
)
if self.has_changes():
output.annotate(
"Hg clone at {} is dirty, going to lose changes.".format(
self.target),
red=True,
)
raise UpdateNeeded()
if self.revision:
long_rev = len(self.revision) == 40
if self.current_revision(long_rev) != self.revision:
raise UpdateNeeded()
if self.branch and (self.current_branch() != self.branch
or self.has_incoming_changesets()):
raise UpdateNeeded()
@property
def revision_or_branch(self):
return self.revision or self.branch
def current_revision(self, long=False):
debug = "--debug" if long else ""
stdout, stderr = self.cmd(
self.expand(
"LANG=C hg --cwd {{component.target}} {{debug}} parent | "
"grep changeset:",
debug=debug,
))
match = self._revision_pattern.search(stdout)
if not match:
return None
return match.group(1)
def current_branch(self):
stdout, stderr = self.cmd("hg branch")
return stdout.strip()
def has_incoming_changesets(self):
try:
self.cmd("hg incoming -q -l1")
except CmdExecutionError as e:
if e.returncode == 1:
return False
raise
return True
def has_outgoing_changesets(self):
try:
with self.chdir(self.target):
self.cmd("hg outgoing -q -l1")
except CmdExecutionError as e:
if e.returncode == 1:
return False
raise
return True
def has_changes(self):
with self.chdir(self.target):
stdout, stderr = self.cmd("hg status")
return bool(stdout.strip())
def update(self):
with self.chdir(self.target):
if not os.path.exists(".hg"):
self.cmd(
self.expand("hg clone -u {{component.revision_or_branch}} "
"{{component.url}} ."))
return
self.cmd(
self.expand("hg pull --rev {{component.revision_or_branch}}"))
for filepath in self.untracked_files():
os.unlink(os.path.join(self.target, filepath))
self.cmd(
self.expand(
"hg update --clean --rev {{component.revision_or_branch}}")
)
def untracked_files(self):
stdout, stderr = self.cmd("hg status -q -u")
items = (line.split(None, 1) for line in stdout.splitlines())
return [filepath for status, filepath in items if status == "?"]
def last_updated(self):
with self.chdir(self.target):
if not os.path.exists(".hg"):
return None
stdout, stderr = self.cmd(
'hg log -r %s --template "{date|hgdate}\n"' %
self.current_revision())
timestamp, offset = stdout.split()
return float(timestamp) - float(offset)
| true
| true
|
790895a9c0092fc422fdfecdb2af9ea102909e34
| 86,459
|
py
|
Python
|
ThirdParty/AutobahnPython/autobahn/wamp/message.py
|
inviCRO/VTK
|
a2dc2e79d4ecb8f6da900535b32e1a2a702c7f48
|
[
"BSD-3-Clause"
] | 1
|
2021-12-02T07:23:36.000Z
|
2021-12-02T07:23:36.000Z
|
ThirdParty/AutobahnPython/autobahn/wamp/message.py
|
inviCRO/VTK
|
a2dc2e79d4ecb8f6da900535b32e1a2a702c7f48
|
[
"BSD-3-Clause"
] | null | null | null |
ThirdParty/AutobahnPython/autobahn/wamp/message.py
|
inviCRO/VTK
|
a2dc2e79d4ecb8f6da900535b32e1a2a702c7f48
|
[
"BSD-3-Clause"
] | 1
|
2021-12-02T07:29:15.000Z
|
2021-12-02T07:29:15.000Z
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
| 31.212635
| 360
| 0.633132
|
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
E_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
e_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
MESSAGE_TYPE = 3
def __init__(self, reason, message = None):
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
MESSAGE_TYPE = 4
def __init__(self, method, extra = None):
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
AGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
MESSAGE_TYPE = 5
def __init__(self, signature, extra = None):
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
ESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
MESSAGE_TYPE = 6
DEFAULT_REASON = u"wamp.goodbye.normal"
def __init__(self, reason = DEFAULT_REASON, message = None):
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
E_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
MESSAGE_TYPE = 7
def __init__(self, incoming, outgoing, discard = None):
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
AGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0:
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0:
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
MESSAGE_TYPE = 8
def __init__(self, request_type, request, error, args = None, kwargs = None):
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
MESSAGE_TYPE = 16
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
E_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
MESSAGE_TYPE = 17
def __init__(self, request, publication):
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
AGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
MESSAGE_TYPE = 32
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
AGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
MESSAGE_TYPE = 33
def __init__(self, request, subscription):
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
SAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
MESSAGE_TYPE = 34
def __init__(self, request, subscription):
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
SSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
MESSAGE_TYPE = 35
def __init__(self, request):
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
ESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
MESSAGE_TYPE = 36
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
MESSAGE_TYPE = 48
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
YPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
MESSAGE_TYPE = 49
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
MESSAGE_TYPE = 50
def __init__(self, request, args = None, kwargs = None, progress = None):
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
MESSAGE_TYPE = 64
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
MESSAGE_TYPE = 65
def __init__(self, request, registration):
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
MESSAGE_TYPE = 66
def __init__(self, request, registration):
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
MESSAGE_TYPE = 67
def __init__(self, request):
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
MESSAGE_TYPE = 68
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
MESSAGE_TYPE = 69
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
MESSAGE_TYPE = 70
def __init__(self, request, args = None, kwargs = None, progress = None):
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
| true
| true
|
790895e4939cccce765f2b1f8913f2bb504aaf99
| 456
|
py
|
Python
|
iceAndFire01.py
|
QPThree/python-mycode
|
9823fa89eee3019287200f1af8a01efd181fcc79
|
[
"MIT"
] | 1
|
2022-01-05T16:07:46.000Z
|
2022-01-05T16:07:46.000Z
|
iceAndFire01.py
|
QPThree/python-mycode
|
9823fa89eee3019287200f1af8a01efd181fcc79
|
[
"MIT"
] | null | null | null |
iceAndFire01.py
|
QPThree/python-mycode
|
9823fa89eee3019287200f1af8a01efd181fcc79
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""Alta3 Research - Exploring OpenAPIs with requests"""
# documentation for this API is at
# https://anapioficeandfire.com/Documentation
import requests
AOIF = "https://www.anapioficeandfire.com/api"
def main():
## Send HTTPS GET to the API of ICE and Fire
gotresp = requests.get(AOIF)
## Decode the response
got_dj = gotresp.json()
## print the response
print(got_dj)
if __name__ == "__main__":
main()
| 20.727273
| 55
| 0.686404
|
import requests
AOIF = "https://www.anapioficeandfire.com/api"
def main():
n()
if __name__ == "__main__":
main()
| true
| true
|
7908964aca83bd9faccdda451901c3ffcc1c3467
| 6,705
|
py
|
Python
|
gpkit/constraints/relax.py
|
giserh/gpkit
|
71b953fcac8f67f148b67b54b6e8cd4182dc0b3b
|
[
"MIT"
] | null | null | null |
gpkit/constraints/relax.py
|
giserh/gpkit
|
71b953fcac8f67f148b67b54b6e8cd4182dc0b3b
|
[
"MIT"
] | null | null | null |
gpkit/constraints/relax.py
|
giserh/gpkit
|
71b953fcac8f67f148b67b54b6e8cd4182dc0b3b
|
[
"MIT"
] | null | null | null |
"""Models for assessing primal feasibility"""
from __future__ import unicode_literals
from .set import ConstraintSet
from ..nomials import Variable, VectorVariable, parse_subs, NomialArray
from ..keydict import KeyDict
from .. import NamedVariables, SignomialsEnabled
class ConstraintsRelaxedEqually(ConstraintSet):
"""Relax constraints the same amount, as in Eqn. 10 of [Boyd2007].
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
Attributes
----------
relaxvar : Variable
The variable controlling the relaxation. A solved value of 1 means no
relaxation. Higher values indicate the amount by which all constraints
have been made easier: e.g., a value of 1.5 means all constraints were
50 percent easier in the final solution than in the original problem.
[Boyd2007] : "A tutorial on geometric programming", Optim Eng 8:67-122
"""
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvar = Variable("C")
with SignomialsEnabled():
for constraint in constraints.flat():
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvar))
ConstraintSet.__init__(self, {
"relaxed constraints": relconstraints,
"minimum relaxation": self.relaxvar >= 1}, substitutions)
class ConstraintsRelaxed(ConstraintSet):
"""Relax constraints, as in Eqn. 11 of [Boyd2007].
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
Attributes
----------
relaxvars : Variable
The variables controlling the relaxation. A solved value of 1 means no
relaxation was necessary or optimal for a particular constraint.
Higher values indicate the amount by which that constraint has been
made easier: e.g., a value of 1.5 means it was made 50 percent easier
in the final solution than in the original problem.
[Boyd2007] : "A tutorial on geometric programming", Optim Eng 8:67-122
"""
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvars = VectorVariable(len(constraints), "C")
with SignomialsEnabled():
for i, constraint in enumerate(constraints.flat()):
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvars[i]))
ConstraintSet.__init__(self, {
"relaxed constraints": relconstraints,
"minimum relaxation": self.relaxvars >= 1}, substitutions)
class ConstantsRelaxed(ConstraintSet):
"""Relax constants in a constraintset.
Arguments
---------
constraints : iterable
Constraints which will be relaxed (made easier).
include_only : set (optional)
variable names must be in this set to be relaxed
exclude : set (optional)
variable names in this set will never be relaxed
Attributes
----------
relaxvars : Variable
The variables controlling the relaxation. A solved value of 1 means no
relaxation was necessary or optimal for a particular constant.
Higher values indicate the amount by which that constant has been
made easier: e.g., a value of 1.5 means it was made 50 percent easier
in the final solution than in the original problem. Of course, this
can also be determined by looking at the constant's new value directly.
"""
# pylint:disable=too-many-locals
def __init__(self, constraints, include_only=None, exclude=None):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
exclude = frozenset(exclude) if exclude else frozenset()
include_only = frozenset(include_only) if include_only else frozenset()
substitutions = KeyDict(constraints.substitutions)
constants, _, linked = parse_subs(constraints.varkeys, substitutions)
constrained_varkeys = constraints.constrained_varkeys()
if linked:
kdc = KeyDict(constants)
constants.update({k: f(kdc) for k, f in linked.items()
if k in constrained_varkeys})
self.constants = constants
relaxvars, self.origvars, relaxation_constraints = [], [], {}
with NamedVariables("Relax") as (self.lineage, _):
pass
self._unrelaxmap = {}
for key, value in constants.items():
if value == 0:
continue
elif include_only and key.name not in include_only:
continue
elif key.name in exclude:
continue
key.descr.pop("gradients", None)
descr = key.descr.copy()
descr.pop("veckey", None)
descr["lineage"] = descr.pop("lineage", ())+(self.lineage[-1],)
relaxvardescr = descr.copy()
relaxvardescr["unitrepr"] = "-"
relaxvar = Variable(**relaxvardescr)
relaxvars.append(relaxvar)
del substitutions[key]
var = Variable(**key.descr)
self.origvars.append(var)
unrelaxeddescr = descr.copy()
unrelaxeddescr["lineage"] += (("OriginalValues", 0),)
unrelaxed = Variable(**unrelaxeddescr)
self._unrelaxmap[unrelaxed.key] = key
substitutions[unrelaxed] = value
relaxation_constraints[str(key)] = [relaxvar >= 1,
unrelaxed/relaxvar <= var,
var <= unrelaxed*relaxvar]
self.relaxvars = NomialArray(relaxvars)
ConstraintSet.__init__(self, {
"original constraints": constraints,
"relaxation constraints": relaxation_constraints})
self.substitutions = substitutions
def process_result(self, result):
ConstraintSet.process_result(self, result)
csenss = result["sensitivities"]["constants"]
for const, origvar in self._unrelaxmap.items():
csenss[origvar] = csenss[const]
del csenss[const]
| 40.391566
| 79
| 0.633557
|
from __future__ import unicode_literals
from .set import ConstraintSet
from ..nomials import Variable, VectorVariable, parse_subs, NomialArray
from ..keydict import KeyDict
from .. import NamedVariables, SignomialsEnabled
class ConstraintsRelaxedEqually(ConstraintSet):
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvar = Variable("C")
with SignomialsEnabled():
for constraint in constraints.flat():
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvar))
ConstraintSet.__init__(self, {
"relaxed constraints": relconstraints,
"minimum relaxation": self.relaxvar >= 1}, substitutions)
class ConstraintsRelaxed(ConstraintSet):
def __init__(self, constraints):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
substitutions = dict(constraints.substitutions)
relconstraints = []
self.origconstrs = []
with NamedVariables("Relax"):
self.relaxvars = VectorVariable(len(constraints), "C")
with SignomialsEnabled():
for i, constraint in enumerate(constraints.flat()):
self.origconstrs.append(constraint)
relconstraints.append(constraint.relaxed(self.relaxvars[i]))
ConstraintSet.__init__(self, {
"relaxed constraints": relconstraints,
"minimum relaxation": self.relaxvars >= 1}, substitutions)
class ConstantsRelaxed(ConstraintSet):
def __init__(self, constraints, include_only=None, exclude=None):
if not isinstance(constraints, ConstraintSet):
constraints = ConstraintSet(constraints)
exclude = frozenset(exclude) if exclude else frozenset()
include_only = frozenset(include_only) if include_only else frozenset()
substitutions = KeyDict(constraints.substitutions)
constants, _, linked = parse_subs(constraints.varkeys, substitutions)
constrained_varkeys = constraints.constrained_varkeys()
if linked:
kdc = KeyDict(constants)
constants.update({k: f(kdc) for k, f in linked.items()
if k in constrained_varkeys})
self.constants = constants
relaxvars, self.origvars, relaxation_constraints = [], [], {}
with NamedVariables("Relax") as (self.lineage, _):
pass
self._unrelaxmap = {}
for key, value in constants.items():
if value == 0:
continue
elif include_only and key.name not in include_only:
continue
elif key.name in exclude:
continue
key.descr.pop("gradients", None)
descr = key.descr.copy()
descr.pop("veckey", None)
descr["lineage"] = descr.pop("lineage", ())+(self.lineage[-1],)
relaxvardescr = descr.copy()
relaxvardescr["unitrepr"] = "-"
relaxvar = Variable(**relaxvardescr)
relaxvars.append(relaxvar)
del substitutions[key]
var = Variable(**key.descr)
self.origvars.append(var)
unrelaxeddescr = descr.copy()
unrelaxeddescr["lineage"] += (("OriginalValues", 0),)
unrelaxed = Variable(**unrelaxeddescr)
self._unrelaxmap[unrelaxed.key] = key
substitutions[unrelaxed] = value
relaxation_constraints[str(key)] = [relaxvar >= 1,
unrelaxed/relaxvar <= var,
var <= unrelaxed*relaxvar]
self.relaxvars = NomialArray(relaxvars)
ConstraintSet.__init__(self, {
"original constraints": constraints,
"relaxation constraints": relaxation_constraints})
self.substitutions = substitutions
def process_result(self, result):
ConstraintSet.process_result(self, result)
csenss = result["sensitivities"]["constants"]
for const, origvar in self._unrelaxmap.items():
csenss[origvar] = csenss[const]
del csenss[const]
| true
| true
|
79089679a0eafa7ae301e31dcaa235328902ee39
| 10,695
|
py
|
Python
|
examples/rsc_baseline.py
|
ZhaoChuyang/dgreid
|
ee1d7af74b796f2f194307ab023e43ecc3d3d525
|
[
"MIT"
] | null | null | null |
examples/rsc_baseline.py
|
ZhaoChuyang/dgreid
|
ee1d7af74b796f2f194307ab023e43ecc3d3d525
|
[
"MIT"
] | null | null | null |
examples/rsc_baseline.py
|
ZhaoChuyang/dgreid
|
ee1d7af74b796f2f194307ab023e43ecc3d3d525
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import collections
import copy
import time
from datetime import timedelta
from sklearn.cluster import DBSCAN, KMeans
from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
sys.path.append(".")
from reid import datasets
from reid import models
# from reid.models.dsbn import convert_dsbn, convert_bn
# from reid.models.csbn import convert_csbn
# from reid.models.idm_dsbn import convert_dsbn_idm, convert_bn_idm
# from reid.models.xbm import XBM
from reid.trainers import RSCTrainer
from reid.evaluators import Evaluator, extract_features
from reid.utils.data import CommDataset
from reid.utils.data import IterLoader
from reid.utils.data import transforms as T
from reid.utils.data.sampler import RandomMultipleGallerySampler
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from reid.utils.rerank import compute_jaccard_distance
start_epoch = best_mAP = 0
def get_data(name, data_dir, combineall=False):
# data_dir = '/data/datasets'
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, combineall=combineall)
return dataset
def get_train_loader(args, dataset, height, width, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
# T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
train_set = sorted(dataset.train) if trainset is None else sorted(trainset)
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout,
num_classes=args.nclass)
# use CUDA
model.cuda()
model = nn.DataParallel(model)
return model
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
start_time = time.monotonic()
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create datasets
iters = args.iters if (args.iters>0) else None
print("==> Load source-domain dataset")
train_items = []
for src in args.dataset_source.split(','):
dataset = get_data(src, args.data_dir, args.combine_all)
train_items.extend(dataset.train)
dataset_source = CommDataset(train_items)
print("==> Load target-domain dataset")
dataset_target = get_data(args.dataset_target, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
train_loader_source = get_train_loader(args, dataset_source, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
source_classes = dataset_source.num_train_pids
args.nclass = source_classes
# Create model
model = create_model(args)
print(model)
# Evaluator
evaluator = Evaluator(model)
# Optimizer
params = [{"params": [value]} for _, value in model.named_parameters() if value.requires_grad]
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)
# Trainer
trainer = RSCTrainer(model, args.nclass, margin=args.margin)
for epoch in range(args.epochs):
train_loader_source.new_epoch()
# train_loader_target.new_epoch()
trainer.train(epoch, train_loader_source, optimizer, print_freq=args.print_freq, train_iters=args.iters)
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
print('Test on target: ', args.dataset_target)
_, mAP = evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
is_best = (mAP>best_mAP)
best_mAP = max(mAP, best_mAP)
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
print('\n * Finished epoch {:3d} model mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP, best_mAP, ' *' if is_best else ''))
lr_scheduler.step()
print ('==> Test with the best model on the target domain:')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
end_time = time.monotonic()
print('Total running time: ', timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Self-paced contrastive learning on UDA re-ID")
# data
parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc')
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501')
parser.add_argument('--combine-all', action='store_true',
help="if True: combinall train, query, gallery for training;")
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# cluster
parser.add_argument('--eps', type=float, default=0.6,
help="max neighbor distance for DBSCAN")
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--nclass', type=int, default=1000,
help="number of classes (source+target)")
parser.add_argument('--s-class', type=int, default=1000,
help="number of classes (source)")
parser.add_argument('--t-class', type=int, default=1000,
help="number of classes (target)")
# loss
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--mu1', type=float, default=0.5,
help="weight for loss_bridge_pred")
parser.add_argument('--mu2', type=float, default=0.1,
help="weight for loss_bridge_feat")
parser.add_argument('--mu3', type=float, default=1,
help="weight for loss_div")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_idm',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# xbm parameters
parser.add_argument('--memorySize', type=int, default=8192,
help='meomory bank size')
parser.add_argument('--ratio', type=float, default=1,
help='memorySize=ratio*data_size')
parser.add_argument('--featureSize', type=int, default=2048)
parser.add_argument('--use-xbm', action='store_true', help="if True: strong baseline; if False: naive baseline")
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=60)
parser.add_argument('--iters', type=int, default=200)
parser.add_argument('--step-size', type=int, default=30)
# training configs
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--eval-step', type=int, default=10)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, default='/data/datasets')
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
# hbchen
parser.add_argument('--csdn', type=bool, default=False)
main()
| 39.464945
| 120
| 0.651426
|
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import collections
import copy
import time
from datetime import timedelta
from sklearn.cluster import DBSCAN, KMeans
from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
sys.path.append(".")
from reid import datasets
from reid import models
from reid.trainers import RSCTrainer
from reid.evaluators import Evaluator, extract_features
from reid.utils.data import CommDataset
from reid.utils.data import IterLoader
from reid.utils.data import transforms as T
from reid.utils.data.sampler import RandomMultipleGallerySampler
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from reid.utils.rerank import compute_jaccard_distance
start_epoch = best_mAP = 0
def get_data(name, data_dir, combineall=False):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, combineall=combineall)
return dataset
def get_train_loader(args, dataset, height, width, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
])
train_set = sorted(dataset.train) if trainset is None else sorted(trainset)
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout,
num_classes=args.nclass)
model.cuda()
model = nn.DataParallel(model)
return model
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
start_time = time.monotonic()
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
iters = args.iters if (args.iters>0) else None
print("==> Load source-domain dataset")
train_items = []
for src in args.dataset_source.split(','):
dataset = get_data(src, args.data_dir, args.combine_all)
train_items.extend(dataset.train)
dataset_source = CommDataset(train_items)
print("==> Load target-domain dataset")
dataset_target = get_data(args.dataset_target, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
train_loader_source = get_train_loader(args, dataset_source, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
source_classes = dataset_source.num_train_pids
args.nclass = source_classes
model = create_model(args)
print(model)
evaluator = Evaluator(model)
params = [{"params": [value]} for _, value in model.named_parameters() if value.requires_grad]
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)
trainer = RSCTrainer(model, args.nclass, margin=args.margin)
for epoch in range(args.epochs):
train_loader_source.new_epoch()
trainer.train(epoch, train_loader_source, optimizer, print_freq=args.print_freq, train_iters=args.iters)
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
print('Test on target: ', args.dataset_target)
_, mAP = evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
is_best = (mAP>best_mAP)
best_mAP = max(mAP, best_mAP)
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
print('\n * Finished epoch {:3d} model mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP, best_mAP, ' *' if is_best else ''))
lr_scheduler.step()
print ('==> Test with the best model on the target domain:')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
end_time = time.monotonic()
print('Total running time: ', timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Self-paced contrastive learning on UDA re-ID")
parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc')
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501')
parser.add_argument('--combine-all', action='store_true',
help="if True: combinall train, query, gallery for training;")
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
parser.add_argument('--eps', type=float, default=0.6,
help="max neighbor distance for DBSCAN")
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--nclass', type=int, default=1000,
help="number of classes (source+target)")
parser.add_argument('--s-class', type=int, default=1000,
help="number of classes (source)")
parser.add_argument('--t-class', type=int, default=1000,
help="number of classes (target)")
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--mu1', type=float, default=0.5,
help="weight for loss_bridge_pred")
parser.add_argument('--mu2', type=float, default=0.1,
help="weight for loss_bridge_feat")
parser.add_argument('--mu3', type=float, default=1,
help="weight for loss_div")
parser.add_argument('-a', '--arch', type=str, default='resnet50_idm',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
parser.add_argument('--memorySize', type=int, default=8192,
help='meomory bank size')
parser.add_argument('--ratio', type=float, default=1,
help='memorySize=ratio*data_size')
parser.add_argument('--featureSize', type=int, default=2048)
parser.add_argument('--use-xbm', action='store_true', help="if True: strong baseline; if False: naive baseline")
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=60)
parser.add_argument('--iters', type=int, default=200)
parser.add_argument('--step-size', type=int, default=30)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--eval-step', type=int, default=10)
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, default='/data/datasets')
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
parser.add_argument('--csdn', type=bool, default=False)
main()
| true
| true
|
7908969220dacc01da99a5a8d394b71fba41cd69
| 50,606
|
py
|
Python
|
env/lib/python3.5/site-packages/numpy/core/multiarray.py
|
Udolf15/recommedMeMovies
|
be5ae74acd98e3f93beaaa5bb55623974fb24247
|
[
"MIT"
] | 366
|
2019-04-07T20:34:48.000Z
|
2022-03-29T07:35:38.000Z
|
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
|
haideraltahan/CropMe
|
75a111b9d3b2c50c6f2a9a36d21432053f02284d
|
[
"MIT"
] | 16
|
2020-03-24T17:30:37.000Z
|
2022-03-11T23:57:41.000Z
|
venv/lib/python3.7/site-packages/numpy/core/multiarray.py
|
haideraltahan/CropMe
|
75a111b9d3b2c50c6f2a9a36d21432053f02284d
|
[
"MIT"
] | 61
|
2019-04-08T00:58:14.000Z
|
2022-03-20T23:04:28.000Z
|
"""
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
import functools
import warnings
from . import overrides
from . import _multiarray_umath
import numpy as np
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None):
"""
empty_like(prototype, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None):
"""
concatenate((a1, a2, ...), axis=0, out=None)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
block : Assemble arrays from blocks.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data=[0, 1, 2, 2, 3, 4],
mask=False,
fill_value=999999)
>>> np.ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
"""
lexsort(keys, axis=-1)
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print(ind)
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
"""
can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, complex)
True
>>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
"""
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
"""
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
"""
bincount(x, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
"""
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None, dims=None):
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
if dims is not None:
warnings.warn("'shape' argument should be used instead of 'dims'",
DeprecationWarning, stacklevel=3)
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
"""
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def packbits(myarray, axis=None):
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
"""
return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(myarray, axis=None):
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
The dimension over which bit-unpacking is done.
``None`` implies unpacking the flattened array.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
"""
return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
"""
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
"""
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
"""
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
"""
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
Convert an array of datetimes into an array of strings.
Parameters
----------
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
first, and suffix with a +-#### timezone offset. If a tzinfo object,
then do as with 'local', but use the specified timezone.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
Casting to allow when changing between datetime units.
Returns
-------
str_arr : ndarray
An array of strings the same shape as `arr`.
Examples
--------
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
>>> d
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
'2002-10-27T07:30'], dtype='datetime64[m]')
Setting the timezone to UTC shows the same information, but with a Z suffix
>>> np.datetime_as_string(d, timezone='UTC')
array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
dtype='<U32')
>>> np.datetime_as_string(d, unit='s')
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
TypeError: Cannot create a datetime string as units 'h' from a NumPy
datetime with units 'm' according to the rule 'safe'
"""
return (arr,)
| 32.274235
| 128
| 0.619571
|
import functools
import warnings
from . import overrides
from . import _multiarray_umath
import numpy as np
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None):
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None):
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None, dims=None):
if dims is not None:
warnings.warn("'shape' argument should be used instead of 'dims'",
DeprecationWarning, stacklevel=3)
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def packbits(myarray, axis=None):
return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(myarray, axis=None):
return (myarray,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
return (arr,)
| true
| true
|
790896f07b7775e6ad5dbd899a45e2d39ea6a7de
| 323
|
py
|
Python
|
EjemploMetodos.py
|
gcardosov/PythonAprendeOrg
|
0cad81f0a584c98389ca729a337d30581780e520
|
[
"MIT"
] | 1
|
2018-03-07T05:26:12.000Z
|
2018-03-07T05:26:12.000Z
|
EjemploMetodos.py
|
gcardosov/PythonAprendeOrg
|
0cad81f0a584c98389ca729a337d30581780e520
|
[
"MIT"
] | null | null | null |
EjemploMetodos.py
|
gcardosov/PythonAprendeOrg
|
0cad81f0a584c98389ca729a337d30581780e520
|
[
"MIT"
] | null | null | null |
class Persona:
def __init__(self):
self.edad = 18
self.nombre = "juan"
print "Se ha creado a", self.nombre, "de", self.edad
def hablar(self,palabras ="No se que decir"):
print self.nombre,': ', palabras
juan = Persona()
juan.hablar()
juan.hablar("Hola estoy hablando")
| 24.846154
| 61
| 0.591331
|
class Persona:
def __init__(self):
self.edad = 18
self.nombre = "juan"
print "Se ha creado a", self.nombre, "de", self.edad
def hablar(self,palabras ="No se que decir"):
print self.nombre,': ', palabras
juan = Persona()
juan.hablar()
juan.hablar("Hola estoy hablando")
| false
| true
|
790898575f1f55937fa301c7f45fbe8da8be78a9
| 308
|
py
|
Python
|
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/01_First-Steps-in-Coding/00.Book-Exercise-2.1-11-USD-to-BGN.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/01_First-Steps-in-Coding/00.Book-Exercise-2.1-11-USD-to-BGN.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/01_First-Steps-in-Coding/00.Book-Exercise-2.1-11-USD-to-BGN.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
# console converter - USD to BGN
# Write a program for converting US dollars (USD) into Bulgarian levs (BGN).
# Round the result to 2 digits after the decimal point. Use a fixed exchange rate between the dollar and the lev: 1 USD = 1.79549 BGN.
USD = float(input())
BGN = round(USD * 1.79549, 2)
print(BGN)
| 38.5
| 134
| 0.724026
|
USD = float(input())
BGN = round(USD * 1.79549, 2)
print(BGN)
| true
| true
|
79089951aca1c889b3b8d866cd00f096803323ad
| 12,371
|
py
|
Python
|
opytimizer/optimizers/social/qsa.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 528
|
2018-10-01T20:00:09.000Z
|
2022-03-27T11:15:31.000Z
|
opytimizer/optimizers/social/qsa.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 17
|
2019-10-30T00:47:03.000Z
|
2022-03-21T11:39:28.000Z
|
opytimizer/optimizers/social/qsa.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 35
|
2018-10-01T20:03:23.000Z
|
2022-03-20T03:54:15.000Z
|
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
| 35.446991
| 100
| 0.546035
|
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
def __init__(self, params=None):
logger.info('Overriding class: Optimizer -> QSA.')
super(QSA, self).__init__()
self.build(params)
logger.info('Class overrided.')
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
if t_1 > c.EPSILON:
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
else:
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
agents.sort(key=lambda x: x.fit)
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
case = None
for i, agent in enumerate(agents):
a = copy.deepcopy(agent)
if i < q_1:
if i == 0:
case = 1
A = copy.deepcopy(A_1)
elif q_1 <= i < q_1 + q_2:
if i == q_1:
case = 1
A = copy.deepcopy(A_2)
else:
if i == q_1 + q_2:
case = 1
A = copy.deepcopy(A_3)
alpha = r.generate_uniform_random_number(-1, 1)
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
if case == 1:
e = r.generate_gamma_random_number(1, 0.5, 1)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
case = 2
else:
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
case = 1
def _business_two(self, agents, function):
agents.sort(key=lambda x: x.fit)
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
for i, agent in enumerate(agents):
a = copy.deepcopy(agent)
if i < q_1:
A = copy.deepcopy(A_1)
elif q_1 <= i < q_1 + q_2:
A = copy.deepcopy(A_2)
else:
A = copy.deepcopy(A_3)
r1 = r.generate_uniform_random_number()
if r1 < pr[i]:
A_1, A_2 = np.random.choice(agents, 2, replace=False)
r2 = r.generate_uniform_random_number()
e = r.generate_gamma_random_number(1, 0.5, 1)
if r2 < cv:
F_1 = e * (A_1.position - A_2.position)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
a.fit = function(a.position)
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
agents.sort(key=lambda x: x.fit)
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
for i, agent in enumerate(agents):
a = copy.deepcopy(agent)
for j in range(agent.n_variables):
r1 = r.generate_uniform_random_number()
if r1 < pr[i]:
A_1, A_2 = np.random.choice(agents, 2, replace=False)
e = r.generate_gamma_random_number(1, 0.5, 1)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
| true
| true
|
7908996791bbdb38adfbaea47b80c5dbaabad36f
| 18,627
|
py
|
Python
|
edu54book/edu54bookSizeAuto.py
|
Clonexy700/edu54book
|
2a83f178947ddaf72ae6f94b502dfcf390ea9fe3
|
[
"Unlicense"
] | 1
|
2019-12-24T08:44:32.000Z
|
2019-12-24T08:44:32.000Z
|
edu54book/edu54bookSizeAuto.py
|
Clonexy700/edu54book
|
2a83f178947ddaf72ae6f94b502dfcf390ea9fe3
|
[
"Unlicense"
] | null | null | null |
edu54book/edu54bookSizeAuto.py
|
Clonexy700/edu54book
|
2a83f178947ddaf72ae6f94b502dfcf390ea9fe3
|
[
"Unlicense"
] | null | null | null |
from tkinter import *
from tkinter import ttk
import time
import time
window = Tk()
mygreen = "lightblue"
myred = "blue"
style = ttk.Style()
style.theme_create( "dedoff", parent="alt", settings={
"TNotebook": {"configure": {"tabmargins": [2, 5, 2, 0] } },
"TNotebook.Tab": {
"configure": {"padding": [5, 1], "background": mygreen },
"map": {"background": [("selected", myred)],
"expand": [("selected", [1, 1, 1, 0])] } } } )
style.theme_use("dedoff")
window.title("Электронный учебник tkinter")
window.geometry('1920x1080')
tab_control = ttk.Notebook(window)
#панели
tab1 = ttk.Frame(tab_control, width=1920, height=1080)
tab2 = ttk.Frame(tab_control, width=1920, height=1080)
tab3 = ttk.Frame(tab_control, width=1080, height=600)
tab4 = ttk.Frame(tab_control, width=1080, height=600)
tab5 = ttk.Frame(tab_control, width=1080, height=600)
tab6 = ttk.Frame(tab_control, width=1080, height=600)
tab7 = ttk.Frame(tab_control, width=1080, height=600)
tab8 = ttk.Frame(tab_control, width=1080, height=600)
tab9 = ttk.Frame(tab_control, width=1080, height=600)
tab10 = ttk.Frame(tab_control, width=1080, height=600)
tab_control.add(tab1, text='Начало')
background_image = PhotoImage(file='background.ppm')
background_label = Label(tab1, image=background_image)
background_label.place(relwidth=1, relheight=1)
lower_frame = Frame(tab1, bg="lightblue", bd=10)
lower_frame.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labeltext1 = Label(lower_frame, text="Tkinter – это кроссплатформенная библиотека для разработки графического интерфейса на "
"языке Python\n (начиная с Python 3.0 переименована в tkinter). Tkinter расшифровывается "
"как Tk interface \nНачиная с версии python-3.0 библиотека переименована в соответствии с "
"PEP 8 в tkinter (с маленькой буквы). \nИмпортируется она как и любая другая библиотека "
"абсолютно весь код в этом учебнике написан для python версии 3.x \nПодключить модуль "
"можно с помощью инструкции import. После ключевого слова import указывается название "
"модуля.\n Одной инструкцией можно подключить несколько модулей. Для подключения всех \n"
"функций модуля используем:\n"
"import tkinter \n"
"или \n"
"from tkinter import * \n"
"Чтобы убедиться, что Tkinter установлен и работает, воспользуемся стандартной "
"функцией Tkinter: test():"
"\n"
"import tkinter \n"
"tkinter._test() \n"
,
font=("Times New Roman", 13), bg="white")
labeltext1.place(relwidth=1, relheight=0.6)
photo = PhotoImage(file='edu54img.pgm')
labelimage = Label(lower_frame,bg='white', image=photo)
labelimage.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.4, anchor='n')
#ОГО ВТОРООООООООООЙ ТААААААААААААААААААБ
tab_control.add(tab2, text='Canvas')
background_image2 = PhotoImage(file='background.ppm')
background_label1 = Label(tab2, image=background_image2)
background_label1.place(relwidth=1, relheight=1)
lower_frame1 = Frame(tab2, bg="lightblue", bd=10)
lower_frame1.place(relx=0.5, rely=0.02, relwidth=0.75, relheight=0.95, anchor='n')
labeltext2 = Label(lower_frame1, text=u"Привет, это второй раздел учебника.\n В tkinter от класса Canvas создаются объекты-холсты, на которых можно рисовать,\n"
"размещая различные фигуры и объекты. Делается это с помощью вызовов соответствующих \n"
"методов. При создании экземпляра Canvas необходимо указать его ширину и высоту. При \n"
"размещении геометрических примитивов и других объектов указываются их координаты на \n "
"холсте. Точкой отсчета является верхний левый угол.", font=("Times New Roman", 12), bg="white")
labeltext2.place(relwidth=1, relheight=0.3)
photo2 = PhotoImage(file='edu54img2.pgm')
labelimage1 = Label(lower_frame1, bg='white', image=photo2)
labelimage1.place(relx=0.5, rely=0.30, relwidth=1, relheight=0.49, anchor='n')
labeltext2 = Label(lower_frame1, text="В программе ниже создается холст.\n"
"from tkinter import *\n"
"window = Tk()\n"
"c = Canvas(root, width=200, height=200, bg='white')\n"
"c.pack()\n"
"window.mainloop()\n"
"в следующей главе мы разберем как рисовать на этом холсте", font=("Times New Roman", 12), bg="white")
labeltext2.place(relx=0.5, rely=0.75, relwidth=1, relheight=0.3, anchor='n')
tab_control.add(tab3, text='Примитивы')
background_image3 = PhotoImage(file='background.ppm')
background_label2 = Label(tab3, image=background_image3)
background_label2.place(relwidth=1, relheight=1)
lower_frame2 = Frame(tab3, bg="lightblue", bd=10)
lower_frame2.place(relx=0.5, rely=0.02, relwidth=0.8, relheight=0.95, anchor='n')
labeltext3 = Label(lower_frame2, text="В tkinter уже есть графические примитивы, для рисования, их нужно всего лишь правильно "
"указать.\n В программе ниже создается холст. На нем с помощью метода create_line() "
"рисуются отрезки. \n Сначала указываются координаты начала (x1, y1), затем – конца (x2, "
"y2) В программе ниже создаётся и рисуется линия на холсте.", font=("Times New Roman", 12), bg="white")
labeltext3.place(relwidth=1, relheight=0.12)
codeimg = PhotoImage(file='code.pgm')
labelimg = Label(lower_frame2, bg='white', image=codeimg)
labelimg.place(relx=0.5, rely=0.11, relwidth=1, relheight=0.5, anchor='n')
labelgotext = Label(lower_frame2, text="Собственно сами примитивы. Указываем координаты примитива всегда следующим образом – \n "
"верхний левый угол(x1, y1), вторые – правый нижний(x2, y2).", font=("Times New "
"Roman", 11),
bg='white')
labelgotext.place(relx=0.5, rely=0.52, relwidth=1, relheight=0.07, anchor='n')
rectangle = PhotoImage(file='rectangle.ppm')
rectanglelabel = Label(lower_frame2, bg='white', image=rectangle)
rectanglelabel.place(relx=0.5, rely=0.60, relwidth=1, relheight=0.45, anchor='n')
labelgotext2 = Label(lower_frame2, text="Далее о других примитивах в следующей вкладке", font=("Times New "
"Roman", 11),
bg='white')
labelgotext2.place(relx=0.5, rely=0.97, relwidth=1, relheight=0.05, anchor='n')
tab_control.add(tab4, text='Примитивы 2')
background_image4 = PhotoImage(file='background.ppm')
background_label3 = Label(tab4, image=background_image4)
background_label3.place(relwidth=1, relheight=1)
lower_frame3 = Frame(tab4, bg="lightblue", bd=10)
lower_frame3.place(relx=0.5, rely=0, relwidth=0.9, relheight=1, anchor='n')
oval = PhotoImage(file='oval_1.ppm')
ovallabel = Label(lower_frame3,bg='white', image=oval)
ovallabel.place(relx=0.5, rely=0, relwidth=1, relheight=0.55, anchor='n')
elipsoid = PhotoImage(file='ellipssmall.ppm')
elabel = Label(lower_frame3, bg='white', image=elipsoid)
elabel.place(relx=0.5, rely=0.5, relwidth=1, relheight=0.25, anchor='n')
labeltext4 = Label(lower_frame3, text="Метод create_oval(x1, y1, x2, y2) создает эллипсы. При этом задаются координаты гипотетического "
"прямоугольника, описывающего эллипс. \nЕсли нужно получить круг, то соответственно "
"описываемый прямоугольник должен быть квадратом.\n"
"Методом create_polygon(x1, x2...xn, yn) рисуется произвольный многоугольник путем задания координат каждой его точки\n"
"Создание прямоугольников методом create_rectangle(x1, y1, x2, y2)\n"
"Опции: \nwidth=число - ширина обводки, fill='color' - цвет заливки,\n outline='color' - цвет "
"обводки,\n activefill определяет цвет при наведении на него курсора мыши.\n"
"activeoutline определяет цвет обводки при наведении курсор", font=("Times New Roman", 11),
bg="white")
labeltext4.place(relx=0.5, rely=0.74, relwidth=1, relheight=0.26, anchor='n')
tab_control.add(tab5, text='Примитивы 3')
background_image5 = PhotoImage(file='background.ppm')
background_label4 = Label(tab5, image=background_image5)
background_label4.place(relwidth=1, relheight=1)
lower_frame4 = Frame(tab5, bg="lightblue", bd=10)
lower_frame4.place(relx=0.5, rely=0.05, relwidth=0.75, relheight=0.9, anchor='n')
labeltext5 = Label(lower_frame4, text="Более сложные для понимания фигуры получаются при использовании метода create_arc(). В \n"
"зависимости от значения опции style можно получить сектор (по умолчанию), \n"
"сегмент (CHORD) или дугу (ARC). Также как в случае create_oval() координаты задают \n"
"прямоугольник, в который вписана окружность (или эллипс), из которой вырезают сектор, \n"
"сегмент или дугу. Опции start присваивается градус начала фигуры, extent определяет "
"угол поворота.",
font=("Times New Roman", 11), bg="white")
labeltext5.place(relwidth=1, relheight=0.2)
arc = PhotoImage(file='arc.ppm')
arclabel = Label(lower_frame4,bg='white', image=arc)
arclabel.place(relx=0.5, rely=0.15, relwidth=1, relheight=0.4, anchor='n')
arc2 = PhotoImage(file='arc2.ppm')
arclabel2 = Label(lower_frame4,bg='white', image=arc2)
arclabel2.place(relx=0.5, rely=0.55, relwidth=1, relheight=0.5, anchor='n')
tab_control.add(tab6, text='Полезное')
background_image6 = PhotoImage(file='background.ppm')
background_label6 = Label(tab6, image=background_image6)
background_label6.place(relwidth=1, relheight=1)
table = PhotoImage(file='colortable.ppm')
tablelabel = Label(tab6,bg='lightblue', image=table)
tablelabel.place(relx=0.5, rely=0, relwidth=0.82, relheight=1, anchor='n')
tab_control.add(tab7, text='Практикум')
background_image7 = PhotoImage(file='background.ppm')
background_label7 = Label(tab7, bg='white', image=background_image7)
background_label7.place(relwidth=1, relheight=1)
lower_frame7 = Frame(tab7, bg="lightblue", bd=10)
lower_frame7.place(relx=0.5, rely=0.001, relwidth=0.65, relheight=1, anchor='n')
labelTASK1 = Label(lower_frame7, text="1) Пропеллер"
":Нарисуйте пропеллер, как это показано ниже\n"
"'Кто мечтает быть пилотом, очень смелый видно тот. От-от-от вин-та!'", font=("Georgia", 12,), bg='white')
labelTASK1.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
propeller = PhotoImage(file='propellersmall.ppm')
propelabel = Label(lower_frame7, bg='white', image=propeller)
propelabel.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.55, anchor='n')
labelTASK2 = Label(lower_frame7, text="2) Торт"
":Нарисуйте торт для учителя информатики.\n'Треугольник' должен пропадать при наведении курсора.'\n"
"'Кто сьел мой двумерный массив?!'", font=("Georgia", 12, ), bg='white')
labelTASK2.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.1, anchor='n')
tort = PhotoImage(file='tortsmall.ppm')
tortlabel = Label(lower_frame7, bg='white', image=tort)
tortlabel.place(relx=0.5, rely=0.69, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab8, text='Анимации')
background_image8 = PhotoImage(file='background.ppm')
background_label8 = Label(tab8, image=background_image8)
background_label8.place(relwidth=1, relheight=1)
lower_frame8 = Frame(tab8, bg="lightblue", bd=10)
lower_frame8.place(relx=0.5, rely=0.5, relwidth=0.59, relheight=0.5, anchor='n')
labelanimation = Label(lower_frame8, text='Методы, создающие фигуры на холсте, возвращают численные идентификаторы \n'
'этих объектов, которые можно присвоить переменным,\n через которые позднее '
'обращаться к созданным фигурам. \n Основной шаблон для анимации с Tkinter – написать функцию, которая рисует один кадр. \n Затем используйте что-то подобное, чтобы называть его через регулярные интервалы: \n'
" def animate(self): self.draw_one_frame() self.after(100, self.animate) \n"
"Как только вы вызываете эту функцию один раз,\n она будет продолжать "
'рисовать кадры со скоростью десять в секунду – один раз каждые 100 '
"миллисекунд.\n В следующей вкладке разберём это подробно", font=("Times New Roman", 11),
bg="white")
labelanimation.place(relwidth=1, relheight=1)
WIDTH = 350
HEIGHT = 300
SIZE = 50
canvas = Canvas(tab8, width=WIDTH, height=HEIGHT, bg="blue")
canvas.pack()
color = '#6098cd'
class Ball:
def __init__(self, tag):
self.shape = canvas.create_oval(0, 0, SIZE, SIZE, fill=color, tags=tag)
self.speedx = 10
self.speedy = 15
self.active = True
def ball_update(self):
canvas.move(self.shape, self.speedx, self.speedy)
pos = canvas.coords(self.shape)
if pos[2] >= WIDTH or pos[0] <= 0:
self.speedx *= -1
if pos[3] >= HEIGHT or pos[1] <= 0:
self.speedy *= -1
global switcher
switcher = True
def cycle():
global switcher
canvas.tag_raise("bg")
if switcher:
ball2.ball_update()
ball2.ball_update()
canvas.tag_raise("ball")
else:
ball.ball_update()
ball.ball_update()
canvas.tag_raise("ball2")
tab8.update_idletasks()
switcher = not switcher
tab8.after(40, cycle)
bg = canvas.create_rectangle(0, 0, WIDTH+1, HEIGHT+1, fill="white", tags="bg")
ball = Ball("ball")
ball.ball_update()
ball2 = Ball("ball2")
tab8.after(0, cycle)
tab_control.add(tab9, text='Анимации 2')
background_image9 = PhotoImage(file='background.ppm')
background_label9 = Label(tab9, image=background_image9)
background_label9.place(relwidth=1, relheight=1)
lower_frame9 = Frame(tab9, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labelanimation2 = Label(lower_frame9, text='Рассмотрим следующий код, отвечающий за создание анимации и после этого попрактикуемся. Собственно сам код: \n', font=("Times New Roman", 11),
bg="white")
labelanimation2.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
code_image8 = PhotoImage(file='sharcode.ppm')
code_label8 = Label(lower_frame9, bg='white', image=code_image8)
code_label8.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.6, anchor='n')
labelanimation3 = Label(lower_frame9, text='В данном коде создаётся шар, который двигается. Вначале происходит '
'создание холста Canvas и его "упаковка"\n, а также объекта ball, '
'с помощью примитива круг. После всего этого создаётся функция, которая '
'анимирует данный объект, рассмотрим её очень подробно \n '
'def motion (): - создание функции с названием motion \n'
'c.move(ball, 1, 0) - движение объекта на c. В самом начале при создании \n '
'холста мы назвали его c, следовательно при указании движения на нём мы \n'
'пишем c. move - декоратор, который указывает, что делать. В нашем случае \n'
'двигаться. Но чему? В скобках указываем объект движения и его координаты \n'
'движения x, y. if c.coords(ball)[2] < 300, отвечает за то, чтобы шар \n'
'двигался по координате X меньше 300. root.after(10, motion) - Частота обновлений окна в милисекундах. \n'
'После чего с помощью motion(), запускаем нашу функцию и само окно tkinter.', font=("Times New Roman", 10),
bg="white")
labelanimation3.place(relx=0.5, rely=0.65, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab10, text='Практикум 2')
background_image10 = PhotoImage(file='background.ppm')
background_label10 = Label(tab10, image=background_image10)
background_label10.place(relwidth=1, relheight=1)
# Практикум 2_поезд
c = Canvas(tab10, width=300, height=200, bg="white")
c.place(relx=0.5, rely=0.65, relwidth=0.15, relheight=0.2, anchor='n')
vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue')
line = c.create_line(60, 70, 70, 70, fill='brown', width=6)
vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue')
relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3)
def motion():
c.move(vagon1, 1, 0)
c.move(vagon2, 1, 0)
c.move(line, 1, 0)
if c.coords(vagon1)[0] < 50:
tab10.after(20, motion)
motion()
tab_control.pack(expand=10, fill='both', padx=5, pady=5)
lower_frame9 = Frame(tab10, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.35, relwidth=0.45, relheight=0.25, anchor='n')
labelpractic2 = Label(lower_frame9, text="Анимируйте данный скетч поезда! Исходный код создания самого скетча без холста: \n vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue'\n"
"line = c.create_line(60, 70, 70, 70, fill='brown', width=6) \n"
"vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue') \n"
"relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3) \n", bg='white', font=("Times New Roman", 11))
labelpractic2.place(relwidth=1, relheight=1)
Button(window, text='© Dedov Georgiy 2019').pack(fill='x')
window.resizable(True, True)
window.mainloop()
| 51.31405
| 251
| 0.628872
|
from tkinter import *
from tkinter import ttk
import time
import time
window = Tk()
mygreen = "lightblue"
myred = "blue"
style = ttk.Style()
style.theme_create( "dedoff", parent="alt", settings={
"TNotebook": {"configure": {"tabmargins": [2, 5, 2, 0] } },
"TNotebook.Tab": {
"configure": {"padding": [5, 1], "background": mygreen },
"map": {"background": [("selected", myred)],
"expand": [("selected", [1, 1, 1, 0])] } } } )
style.theme_use("dedoff")
window.title("Электронный учебник tkinter")
window.geometry('1920x1080')
tab_control = ttk.Notebook(window)
tab1 = ttk.Frame(tab_control, width=1920, height=1080)
tab2 = ttk.Frame(tab_control, width=1920, height=1080)
tab3 = ttk.Frame(tab_control, width=1080, height=600)
tab4 = ttk.Frame(tab_control, width=1080, height=600)
tab5 = ttk.Frame(tab_control, width=1080, height=600)
tab6 = ttk.Frame(tab_control, width=1080, height=600)
tab7 = ttk.Frame(tab_control, width=1080, height=600)
tab8 = ttk.Frame(tab_control, width=1080, height=600)
tab9 = ttk.Frame(tab_control, width=1080, height=600)
tab10 = ttk.Frame(tab_control, width=1080, height=600)
tab_control.add(tab1, text='Начало')
background_image = PhotoImage(file='background.ppm')
background_label = Label(tab1, image=background_image)
background_label.place(relwidth=1, relheight=1)
lower_frame = Frame(tab1, bg="lightblue", bd=10)
lower_frame.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labeltext1 = Label(lower_frame, text="Tkinter – это кроссплатформенная библиотека для разработки графического интерфейса на "
"языке Python\n (начиная с Python 3.0 переименована в tkinter). Tkinter расшифровывается "
"как Tk interface \nНачиная с версии python-3.0 библиотека переименована в соответствии с "
"PEP 8 в tkinter (с маленькой буквы). \nИмпортируется она как и любая другая библиотека "
"абсолютно весь код в этом учебнике написан для python версии 3.x \nПодключить модуль "
"можно с помощью инструкции import. После ключевого слова import указывается название "
"модуля.\n Одной инструкцией можно подключить несколько модулей. Для подключения всех \n"
"функций модуля используем:\n"
"import tkinter \n"
"или \n"
"from tkinter import * \n"
"Чтобы убедиться, что Tkinter установлен и работает, воспользуемся стандартной "
"функцией Tkinter: test():"
"\n"
"import tkinter \n"
"tkinter._test() \n"
,
font=("Times New Roman", 13), bg="white")
labeltext1.place(relwidth=1, relheight=0.6)
photo = PhotoImage(file='edu54img.pgm')
labelimage = Label(lower_frame,bg='white', image=photo)
labelimage.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.4, anchor='n')
tab_control.add(tab2, text='Canvas')
background_image2 = PhotoImage(file='background.ppm')
background_label1 = Label(tab2, image=background_image2)
background_label1.place(relwidth=1, relheight=1)
lower_frame1 = Frame(tab2, bg="lightblue", bd=10)
lower_frame1.place(relx=0.5, rely=0.02, relwidth=0.75, relheight=0.95, anchor='n')
labeltext2 = Label(lower_frame1, text=u"Привет, это второй раздел учебника.\n В tkinter от класса Canvas создаются объекты-холсты, на которых можно рисовать,\n"
"размещая различные фигуры и объекты. Делается это с помощью вызовов соответствующих \n"
"методов. При создании экземпляра Canvas необходимо указать его ширину и высоту. При \n"
"размещении геометрических примитивов и других объектов указываются их координаты на \n "
"холсте. Точкой отсчета является верхний левый угол.", font=("Times New Roman", 12), bg="white")
labeltext2.place(relwidth=1, relheight=0.3)
photo2 = PhotoImage(file='edu54img2.pgm')
labelimage1 = Label(lower_frame1, bg='white', image=photo2)
labelimage1.place(relx=0.5, rely=0.30, relwidth=1, relheight=0.49, anchor='n')
labeltext2 = Label(lower_frame1, text="В программе ниже создается холст.\n"
"from tkinter import *\n"
"window = Tk()\n"
"c = Canvas(root, width=200, height=200, bg='white')\n"
"c.pack()\n"
"window.mainloop()\n"
"в следующей главе мы разберем как рисовать на этом холсте", font=("Times New Roman", 12), bg="white")
labeltext2.place(relx=0.5, rely=0.75, relwidth=1, relheight=0.3, anchor='n')
tab_control.add(tab3, text='Примитивы')
background_image3 = PhotoImage(file='background.ppm')
background_label2 = Label(tab3, image=background_image3)
background_label2.place(relwidth=1, relheight=1)
lower_frame2 = Frame(tab3, bg="lightblue", bd=10)
lower_frame2.place(relx=0.5, rely=0.02, relwidth=0.8, relheight=0.95, anchor='n')
labeltext3 = Label(lower_frame2, text="В tkinter уже есть графические примитивы, для рисования, их нужно всего лишь правильно "
"указать.\n В программе ниже создается холст. На нем с помощью метода create_line() "
"рисуются отрезки. \n Сначала указываются координаты начала (x1, y1), затем – конца (x2, "
"y2) В программе ниже создаётся и рисуется линия на холсте.", font=("Times New Roman", 12), bg="white")
labeltext3.place(relwidth=1, relheight=0.12)
codeimg = PhotoImage(file='code.pgm')
labelimg = Label(lower_frame2, bg='white', image=codeimg)
labelimg.place(relx=0.5, rely=0.11, relwidth=1, relheight=0.5, anchor='n')
labelgotext = Label(lower_frame2, text="Собственно сами примитивы. Указываем координаты примитива всегда следующим образом – \n "
"верхний левый угол(x1, y1), вторые – правый нижний(x2, y2).", font=("Times New "
"Roman", 11),
bg='white')
labelgotext.place(relx=0.5, rely=0.52, relwidth=1, relheight=0.07, anchor='n')
rectangle = PhotoImage(file='rectangle.ppm')
rectanglelabel = Label(lower_frame2, bg='white', image=rectangle)
rectanglelabel.place(relx=0.5, rely=0.60, relwidth=1, relheight=0.45, anchor='n')
labelgotext2 = Label(lower_frame2, text="Далее о других примитивах в следующей вкладке", font=("Times New "
"Roman", 11),
bg='white')
labelgotext2.place(relx=0.5, rely=0.97, relwidth=1, relheight=0.05, anchor='n')
tab_control.add(tab4, text='Примитивы 2')
background_image4 = PhotoImage(file='background.ppm')
background_label3 = Label(tab4, image=background_image4)
background_label3.place(relwidth=1, relheight=1)
lower_frame3 = Frame(tab4, bg="lightblue", bd=10)
lower_frame3.place(relx=0.5, rely=0, relwidth=0.9, relheight=1, anchor='n')
oval = PhotoImage(file='oval_1.ppm')
ovallabel = Label(lower_frame3,bg='white', image=oval)
ovallabel.place(relx=0.5, rely=0, relwidth=1, relheight=0.55, anchor='n')
elipsoid = PhotoImage(file='ellipssmall.ppm')
elabel = Label(lower_frame3, bg='white', image=elipsoid)
elabel.place(relx=0.5, rely=0.5, relwidth=1, relheight=0.25, anchor='n')
labeltext4 = Label(lower_frame3, text="Метод create_oval(x1, y1, x2, y2) создает эллипсы. При этом задаются координаты гипотетического "
"прямоугольника, описывающего эллипс. \nЕсли нужно получить круг, то соответственно "
"описываемый прямоугольник должен быть квадратом.\n"
"Методом create_polygon(x1, x2...xn, yn) рисуется произвольный многоугольник путем задания координат каждой его точки\n"
"Создание прямоугольников методом create_rectangle(x1, y1, x2, y2)\n"
"Опции: \nwidth=число - ширина обводки, fill='color' - цвет заливки,\n outline='color' - цвет "
"обводки,\n activefill определяет цвет при наведении на него курсора мыши.\n"
"activeoutline определяет цвет обводки при наведении курсор", font=("Times New Roman", 11),
bg="white")
labeltext4.place(relx=0.5, rely=0.74, relwidth=1, relheight=0.26, anchor='n')
tab_control.add(tab5, text='Примитивы 3')
background_image5 = PhotoImage(file='background.ppm')
background_label4 = Label(tab5, image=background_image5)
background_label4.place(relwidth=1, relheight=1)
lower_frame4 = Frame(tab5, bg="lightblue", bd=10)
lower_frame4.place(relx=0.5, rely=0.05, relwidth=0.75, relheight=0.9, anchor='n')
labeltext5 = Label(lower_frame4, text="Более сложные для понимания фигуры получаются при использовании метода create_arc(). В \n"
"зависимости от значения опции style можно получить сектор (по умолчанию), \n"
"сегмент (CHORD) или дугу (ARC). Также как в случае create_oval() координаты задают \n"
"прямоугольник, в который вписана окружность (или эллипс), из которой вырезают сектор, \n"
"сегмент или дугу. Опции start присваивается градус начала фигуры, extent определяет "
"угол поворота.",
font=("Times New Roman", 11), bg="white")
labeltext5.place(relwidth=1, relheight=0.2)
arc = PhotoImage(file='arc.ppm')
arclabel = Label(lower_frame4,bg='white', image=arc)
arclabel.place(relx=0.5, rely=0.15, relwidth=1, relheight=0.4, anchor='n')
arc2 = PhotoImage(file='arc2.ppm')
arclabel2 = Label(lower_frame4,bg='white', image=arc2)
arclabel2.place(relx=0.5, rely=0.55, relwidth=1, relheight=0.5, anchor='n')
tab_control.add(tab6, text='Полезное')
background_image6 = PhotoImage(file='background.ppm')
background_label6 = Label(tab6, image=background_image6)
background_label6.place(relwidth=1, relheight=1)
table = PhotoImage(file='colortable.ppm')
tablelabel = Label(tab6,bg='lightblue', image=table)
tablelabel.place(relx=0.5, rely=0, relwidth=0.82, relheight=1, anchor='n')
tab_control.add(tab7, text='Практикум')
background_image7 = PhotoImage(file='background.ppm')
background_label7 = Label(tab7, bg='white', image=background_image7)
background_label7.place(relwidth=1, relheight=1)
lower_frame7 = Frame(tab7, bg="lightblue", bd=10)
lower_frame7.place(relx=0.5, rely=0.001, relwidth=0.65, relheight=1, anchor='n')
labelTASK1 = Label(lower_frame7, text="1) Пропеллер"
":Нарисуйте пропеллер, как это показано ниже\n"
"'Кто мечтает быть пилотом, очень смелый видно тот. От-от-от вин-та!'", font=("Georgia", 12,), bg='white')
labelTASK1.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
propeller = PhotoImage(file='propellersmall.ppm')
propelabel = Label(lower_frame7, bg='white', image=propeller)
propelabel.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.55, anchor='n')
labelTASK2 = Label(lower_frame7, text="2) Торт"
":Нарисуйте торт для учителя информатики.\n'Треугольник' должен пропадать при наведении курсора.'\n"
"'Кто сьел мой двумерный массив?!'", font=("Georgia", 12, ), bg='white')
labelTASK2.place(relx=0.5, rely=0.6, relwidth=1, relheight=0.1, anchor='n')
tort = PhotoImage(file='tortsmall.ppm')
tortlabel = Label(lower_frame7, bg='white', image=tort)
tortlabel.place(relx=0.5, rely=0.69, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab8, text='Анимации')
background_image8 = PhotoImage(file='background.ppm')
background_label8 = Label(tab8, image=background_image8)
background_label8.place(relwidth=1, relheight=1)
lower_frame8 = Frame(tab8, bg="lightblue", bd=10)
lower_frame8.place(relx=0.5, rely=0.5, relwidth=0.59, relheight=0.5, anchor='n')
labelanimation = Label(lower_frame8, text='Методы, создающие фигуры на холсте, возвращают численные идентификаторы \n'
'этих объектов, которые можно присвоить переменным,\n через которые позднее '
'обращаться к созданным фигурам. \n Основной шаблон для анимации с Tkinter – написать функцию, которая рисует один кадр. \n Затем используйте что-то подобное, чтобы называть его через регулярные интервалы: \n'
" def animate(self): self.draw_one_frame() self.after(100, self.animate) \n"
"Как только вы вызываете эту функцию один раз,\n она будет продолжать "
'рисовать кадры со скоростью десять в секунду – один раз каждые 100 '
"миллисекунд.\n В следующей вкладке разберём это подробно", font=("Times New Roman", 11),
bg="white")
labelanimation.place(relwidth=1, relheight=1)
WIDTH = 350
HEIGHT = 300
SIZE = 50
canvas = Canvas(tab8, width=WIDTH, height=HEIGHT, bg="blue")
canvas.pack()
color = '
class Ball:
def __init__(self, tag):
self.shape = canvas.create_oval(0, 0, SIZE, SIZE, fill=color, tags=tag)
self.speedx = 10
self.speedy = 15
self.active = True
def ball_update(self):
canvas.move(self.shape, self.speedx, self.speedy)
pos = canvas.coords(self.shape)
if pos[2] >= WIDTH or pos[0] <= 0:
self.speedx *= -1
if pos[3] >= HEIGHT or pos[1] <= 0:
self.speedy *= -1
global switcher
switcher = True
def cycle():
global switcher
canvas.tag_raise("bg")
if switcher:
ball2.ball_update()
ball2.ball_update()
canvas.tag_raise("ball")
else:
ball.ball_update()
ball.ball_update()
canvas.tag_raise("ball2")
tab8.update_idletasks()
switcher = not switcher
tab8.after(40, cycle)
bg = canvas.create_rectangle(0, 0, WIDTH+1, HEIGHT+1, fill="white", tags="bg")
ball = Ball("ball")
ball.ball_update()
ball2 = Ball("ball2")
tab8.after(0, cycle)
tab_control.add(tab9, text='Анимации 2')
background_image9 = PhotoImage(file='background.ppm')
background_label9 = Label(tab9, image=background_image9)
background_label9.place(relwidth=1, relheight=1)
lower_frame9 = Frame(tab9, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.10, relwidth=0.75, relheight=0.75, anchor='n')
labelanimation2 = Label(lower_frame9, text='Рассмотрим следующий код, отвечающий за создание анимации и после этого попрактикуемся. Собственно сам код: \n', font=("Times New Roman", 11),
bg="white")
labelanimation2.place(relx=0.5, rely=0, relwidth=1, relheight=0.06, anchor='n')
code_image8 = PhotoImage(file='sharcode.ppm')
code_label8 = Label(lower_frame9, bg='white', image=code_image8)
code_label8.place(relx=0.5, rely=0.06, relwidth=1, relheight=0.6, anchor='n')
labelanimation3 = Label(lower_frame9, text='В данном коде создаётся шар, который двигается. Вначале происходит '
'создание холста Canvas и его "упаковка"\n, а также объекта ball, '
'с помощью примитива круг. После всего этого создаётся функция, которая '
'анимирует данный объект, рассмотрим её очень подробно \n '
'def motion (): - создание функции с названием motion \n'
'c.move(ball, 1, 0) - движение объекта на c. В самом начале при создании \n '
'холста мы назвали его c, следовательно при указании движения на нём мы \n'
'пишем c. move - декоратор, который указывает, что делать. В нашем случае \n'
'двигаться. Но чему? В скобках указываем объект движения и его координаты \n'
'движения x, y. if c.coords(ball)[2] < 300, отвечает за то, чтобы шар \n'
'двигался по координате X меньше 300. root.after(10, motion) - Частота обновлений окна в милисекундах. \n'
'После чего с помощью motion(), запускаем нашу функцию и само окно tkinter.', font=("Times New Roman", 10),
bg="white")
labelanimation3.place(relx=0.5, rely=0.65, relwidth=1, relheight=0.35, anchor='n')
tab_control.add(tab10, text='Практикум 2')
background_image10 = PhotoImage(file='background.ppm')
background_label10 = Label(tab10, image=background_image10)
background_label10.place(relwidth=1, relheight=1)
# Практикум 2_поезд
c = Canvas(tab10, width=300, height=200, bg="white")
c.place(relx=0.5, rely=0.65, relwidth=0.15, relheight=0.2, anchor='n')
vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue')
line = c.create_line(60, 70, 70, 70, fill='brown', width=6)
vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue')
relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3)
def motion():
c.move(vagon1, 1, 0)
c.move(vagon2, 1, 0)
c.move(line, 1, 0)
if c.coords(vagon1)[0] < 50:
tab10.after(20, motion)
motion()
tab_control.pack(expand=10, fill='both', padx=5, pady=5)
lower_frame9 = Frame(tab10, bg="lightblue", bd=10)
lower_frame9.place(relx=0.5, rely=0.35, relwidth=0.45, relheight=0.25, anchor='n')
labelpractic2 = Label(lower_frame9, text="Анимируйте данный скетч поезда! Исходный код создания самого скетча без холста: \n vagon1 = c.create_rectangle(0, 50, 60, 90, fill='blue'\n"
"line = c.create_line(60, 70, 70, 70, fill='brown', width=6) \n"
"vagon2 = c.create_rectangle(70, 50, 130, 90, fill='blue') \n"
"relsa = c.create_line(0, 90, 300, 90, fill='gray', width=3) \n", bg='white', font=("Times New Roman", 11))
labelpractic2.place(relwidth=1, relheight=1)
Button(window, text='© Dedov Georgiy 2019').pack(fill='x')
window.resizable(True, True)
window.mainloop()
| true
| true
|
790899861c6261cce46a4e7489c43a8740801311
| 1,994
|
py
|
Python
|
draft/truefx/truefx_tick.py
|
movermeyer/pandas_datareaders_unofficial
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 18
|
2015-02-05T01:42:51.000Z
|
2020-12-27T19:24:25.000Z
|
draft/truefx/truefx_tick.py
|
movermeyer/pandas_datareaders_unofficial
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 1
|
2016-04-05T04:10:40.000Z
|
2016-04-05T04:13:40.000Z
|
draft/truefx/truefx_tick.py
|
femtotrader/pandas_datareaders
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 13
|
2015-09-10T19:39:51.000Z
|
2022-01-06T17:08:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests_cache
import datetime
import pandas as pd
from datetime import timedelta
import pandas as pd
from pandas.io.common import ZipFile
from pandas.compat import BytesIO, StringIO, PY2
def main():
expire_after = timedelta(days=1)
if PY2:
filename = 'cache_py2'
else:
filename = 'cache'
session = requests_cache.CachedSession(cache_name=filename, expire_after=expire_after)
dt = pd.to_datetime("2014-01-01")
symbol = "AUD/USD"
symbol = symbol.replace("/", "").upper()
year = dt.year
month = dt.month
month_name = datetime.datetime(year=1970, month=month, day=1).strftime('%B').upper()
#url = "http://www.truefx.com/dev/data/2014/JANUARY-2014/AUDUSD-2014-01.zip"
url = "http://www.truefx.com/dev/data/{year:04d}/{month_name}-{year:04d}/{symbol}-{year:04d}-{month:02d}.zip".format(year=year, month=month, symbol=symbol, month_name=month_name)
response = session.get(url)
zip_data = BytesIO(response.content)
filename = "{symbol}-{year:04d}-{month:02d}.csv".format(year=year, month=month, symbol=symbol)
with ZipFile(zip_data, 'r') as zf:
#filename = zf.namelist()[0]
zfile = zf.open(filename)
#print(zfile)
#(symb, dt, ask, bid) = zfile.read().split(',')
#print(zfile.__dict__)
data = zfile.readlines()
#df = pd.read_csv(zfile._fileobj) # ToFix: can't make it work correctly
#return
df = pd.DataFrame(data)
#df = df[:100] # just for test
df[0] = df[0].str.decode('utf8')
df[0] = df[0].str.replace('\n', '')
df[0] = df[0].map(lambda s: s.split(','))
df['Symbol'] = df[0].map(lambda t: t[0])
df['Date'] = df[0].map(lambda t: pd.to_datetime(t[1]))
df['Bid'] = df[0].map(lambda t: t[2]).astype(float)
df['Ask'] = df[0].map(lambda t: t[3]).astype(float)
del df[0]
df = df.set_index('Date')
print(df)
if __name__ == "__main__":
main()
| 34.37931
| 182
| 0.622869
|
import requests_cache
import datetime
import pandas as pd
from datetime import timedelta
import pandas as pd
from pandas.io.common import ZipFile
from pandas.compat import BytesIO, StringIO, PY2
def main():
expire_after = timedelta(days=1)
if PY2:
filename = 'cache_py2'
else:
filename = 'cache'
session = requests_cache.CachedSession(cache_name=filename, expire_after=expire_after)
dt = pd.to_datetime("2014-01-01")
symbol = "AUD/USD"
symbol = symbol.replace("/", "").upper()
year = dt.year
month = dt.month
month_name = datetime.datetime(year=1970, month=month, day=1).strftime('%B').upper()
url = "http://www.truefx.com/dev/data/{year:04d}/{month_name}-{year:04d}/{symbol}-{year:04d}-{month:02d}.zip".format(year=year, month=month, symbol=symbol, month_name=month_name)
response = session.get(url)
zip_data = BytesIO(response.content)
filename = "{symbol}-{year:04d}-{month:02d}.csv".format(year=year, month=month, symbol=symbol)
with ZipFile(zip_data, 'r') as zf:
zfile = zf.open(filename)
data = zfile.readlines()
ata)
#df = df[:100] # just for test
df[0] = df[0].str.decode('utf8')
df[0] = df[0].str.replace('\n', '')
df[0] = df[0].map(lambda s: s.split(','))
df['Symbol'] = df[0].map(lambda t: t[0])
df['Date'] = df[0].map(lambda t: pd.to_datetime(t[1]))
df['Bid'] = df[0].map(lambda t: t[2]).astype(float)
df['Ask'] = df[0].map(lambda t: t[3]).astype(float)
del df[0]
df = df.set_index('Date')
print(df)
if __name__ == "__main__":
main()
| true
| true
|
790899bab73f32e8386e72703c630a3a89c361df
| 3,230
|
py
|
Python
|
interactive-deep-colorization/ui/gui_gamut.py
|
arthw/colorization
|
e7f85ec307c9d27a16a87276beaaf2dee5492292
|
[
"BSD-2-Clause"
] | 2
|
2018-08-10T13:15:11.000Z
|
2022-01-15T02:04:18.000Z
|
interactive-deep-colorization/ui/gui_gamut.py
|
arthw/colorization
|
e7f85ec307c9d27a16a87276beaaf2dee5492292
|
[
"BSD-2-Clause"
] | null | null | null |
interactive-deep-colorization/ui/gui_gamut.py
|
arthw/colorization
|
e7f85ec307c9d27a16a87276beaaf2dee5492292
|
[
"BSD-2-Clause"
] | 1
|
2022-02-06T16:00:10.000Z
|
2022-02-06T16:00:10.000Z
|
import cv2
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from data import lab_gamut
import numpy as np
class GUIGamut(QWidget):
def __init__(self, gamut_size=110):
QWidget.__init__(self)
self.gamut_size = gamut_size
self.win_size = gamut_size * 2 # divided by 4
self.setFixedSize(self.win_size, self.win_size)
self.ab_grid = lab_gamut.abGrid(gamut_size=gamut_size, D=1)
self.reset()
def set_gamut(self, l_in=50):
self.l_in = l_in
self.ab_map, self.mask = self.ab_grid.update_gamut(l_in=l_in)
self.update()
def set_ab(self, color):
self.color = color
self.lab = lab_gamut.rgb2lab_1d(self.color)
x, y = self.ab_grid.ab2xy(self.lab[1], self.lab[2])
self.pos = QPointF(x, y)
self.update()
def is_valid_point(self, pos):
if pos is None:
return False
else:
x = pos.x()
y = pos.y()
if x >= 0 and y >= 0 and x < self.win_size and y < self.win_size:
return self.mask[y, x]
else:
return False
def update_ui(self, pos):
self.pos = pos
a, b = self.ab_grid.xy2ab(pos.x(), pos.y())
# get color we need L
L = self.l_in
lab = np.array([L, a, b])
color = lab_gamut.lab2rgb_1d(lab, clip=True, dtype='uint8')
self.emit(SIGNAL('update_color'), color)
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), Qt.white)
if self.ab_map is not None:
ab_map = cv2.resize(self.ab_map, (self.win_size, self.win_size))
qImg = QImage(ab_map.tostring(), self.win_size, self.win_size, QImage.Format_RGB888)
painter.drawImage(0, 0, qImg)
painter.setPen(QPen(Qt.gray, 3, Qt.DotLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
painter.drawLine(self.win_size/2, 0, self.win_size/2, self.win_size)
painter.drawLine(0, self.win_size/2, self.win_size, self.win_size/2)
if self.pos is not None:
painter.setPen(QPen(Qt.black, 2, Qt.SolidLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
w = 5
x = self.pos.x()
y = self.pos.y()
painter.drawLine(x - w, y, x + w, y)
painter.drawLine(x, y - w, x, y + w)
painter.end()
def mousePressEvent(self, event):
pos = event.pos()
if event.button() == Qt.LeftButton and self.is_valid_point(pos): # click the point
self.update_ui(pos)
self.mouseClicked = True
def mouseMoveEvent(self, event):
pos = event.pos()
if self.is_valid_point(pos):
if self.mouseClicked:
self.update_ui(pos)
def mouseReleaseEvent(self, event):
self.mouseClicked = False
def sizeHint(self):
return QSize(self.win_size, self.win_size)
def reset(self):
self.ab_map = None
self.mask = None
self.color = None
self.lab = None
self.pos = None
self.mouseClicked = False
self.update()
| 32.626263
| 96
| 0.581734
|
import cv2
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from data import lab_gamut
import numpy as np
class GUIGamut(QWidget):
def __init__(self, gamut_size=110):
QWidget.__init__(self)
self.gamut_size = gamut_size
self.win_size = gamut_size * 2
self.setFixedSize(self.win_size, self.win_size)
self.ab_grid = lab_gamut.abGrid(gamut_size=gamut_size, D=1)
self.reset()
def set_gamut(self, l_in=50):
self.l_in = l_in
self.ab_map, self.mask = self.ab_grid.update_gamut(l_in=l_in)
self.update()
def set_ab(self, color):
self.color = color
self.lab = lab_gamut.rgb2lab_1d(self.color)
x, y = self.ab_grid.ab2xy(self.lab[1], self.lab[2])
self.pos = QPointF(x, y)
self.update()
def is_valid_point(self, pos):
if pos is None:
return False
else:
x = pos.x()
y = pos.y()
if x >= 0 and y >= 0 and x < self.win_size and y < self.win_size:
return self.mask[y, x]
else:
return False
def update_ui(self, pos):
self.pos = pos
a, b = self.ab_grid.xy2ab(pos.x(), pos.y())
L = self.l_in
lab = np.array([L, a, b])
color = lab_gamut.lab2rgb_1d(lab, clip=True, dtype='uint8')
self.emit(SIGNAL('update_color'), color)
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), Qt.white)
if self.ab_map is not None:
ab_map = cv2.resize(self.ab_map, (self.win_size, self.win_size))
qImg = QImage(ab_map.tostring(), self.win_size, self.win_size, QImage.Format_RGB888)
painter.drawImage(0, 0, qImg)
painter.setPen(QPen(Qt.gray, 3, Qt.DotLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
painter.drawLine(self.win_size/2, 0, self.win_size/2, self.win_size)
painter.drawLine(0, self.win_size/2, self.win_size, self.win_size/2)
if self.pos is not None:
painter.setPen(QPen(Qt.black, 2, Qt.SolidLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
w = 5
x = self.pos.x()
y = self.pos.y()
painter.drawLine(x - w, y, x + w, y)
painter.drawLine(x, y - w, x, y + w)
painter.end()
def mousePressEvent(self, event):
pos = event.pos()
if event.button() == Qt.LeftButton and self.is_valid_point(pos):
self.update_ui(pos)
self.mouseClicked = True
def mouseMoveEvent(self, event):
pos = event.pos()
if self.is_valid_point(pos):
if self.mouseClicked:
self.update_ui(pos)
def mouseReleaseEvent(self, event):
self.mouseClicked = False
def sizeHint(self):
return QSize(self.win_size, self.win_size)
def reset(self):
self.ab_map = None
self.mask = None
self.color = None
self.lab = None
self.pos = None
self.mouseClicked = False
self.update()
| true
| true
|
79089b500417b5aa682b8baa543fb69d9e51b953
| 5,092
|
py
|
Python
|
metadata_service/api/task.py
|
ferras/metaflow-service-clone
|
cc9b4fb83a7e886cd16535f73b9e24dbd21bef0c
|
[
"Apache-2.0"
] | null | null | null |
metadata_service/api/task.py
|
ferras/metaflow-service-clone
|
cc9b4fb83a7e886cd16535f73b9e24dbd21bef0c
|
[
"Apache-2.0"
] | null | null | null |
metadata_service/api/task.py
|
ferras/metaflow-service-clone
|
cc9b4fb83a7e886cd16535f73b9e24dbd21bef0c
|
[
"Apache-2.0"
] | null | null | null |
from ..data.models import TaskRow
from ..data.postgres_async_db import AsyncPostgresDB
from .utils import read_body, format_response, handle_exceptions
import asyncio
class TaskApi(object):
_task_table = None
lock = asyncio.Lock()
def __init__(self, app):
app.router.add_route(
"GET",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "tasks",
self.get_tasks,
)
app.router.add_route(
"GET",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "tasks/{task_id}",
self.get_task,
)
app.router.add_route(
"POST",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "task",
self.create_task,
)
self._async_table = AsyncPostgresDB.get_instance().task_table_postgres
@format_response
@handle_exceptions
async def get_tasks(self, request):
"""
---
description: get all tasks associated with the specified step.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: successful operation. Return tasks
"405":
description: invalid HTTP Method
"""
flow_name = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
return await self._async_table.get_tasks(flow_name, run_number, step_name)
@format_response
@handle_exceptions
async def get_task(self, request):
"""
---
description: get all artifacts associated with the specified task.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
- name: "task_id"
in: "path"
description: "task_id"
required: true
type: "integer"
produces:
- text/plain
responses:
"200":
description: successful operation. Return task
"405":
description: invalid HTTP Method
"""
flow_name = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
task_id = request.match_info.get("task_id")
return await self._async_table.get_task(
flow_name, run_number, step_name, task_id
)
@format_response
@handle_exceptions
async def create_task(self, request):
"""
---
description: This end-point allow to test that service is up.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
- name: "body"
in: "body"
description: "body"
required: true
schema:
type: object
properties:
user_name:
type: string
tags:
type: object
system_tags:
type: object
produces:
- 'text/plain'
responses:
"202":
description: successful operation. Return newly registered task
"405":
description: invalid HTTP Method
"""
flow_id = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
body = await read_body(request.content)
user = body.get("user_name")
tags = body.get("tags")
system_tags = body.get("system_tags")
task = TaskRow(
flow_id=flow_id,
run_number=run_number,
step_name=step_name,
user_name=user,
tags=tags,
system_tags=system_tags,
)
return await self._async_table.add_task(task)
| 29.264368
| 86
| 0.530244
|
from ..data.models import TaskRow
from ..data.postgres_async_db import AsyncPostgresDB
from .utils import read_body, format_response, handle_exceptions
import asyncio
class TaskApi(object):
_task_table = None
lock = asyncio.Lock()
def __init__(self, app):
app.router.add_route(
"GET",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "tasks",
self.get_tasks,
)
app.router.add_route(
"GET",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "tasks/{task_id}",
self.get_task,
)
app.router.add_route(
"POST",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "task",
self.create_task,
)
self._async_table = AsyncPostgresDB.get_instance().task_table_postgres
@format_response
@handle_exceptions
async def get_tasks(self, request):
flow_name = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
return await self._async_table.get_tasks(flow_name, run_number, step_name)
@format_response
@handle_exceptions
async def get_task(self, request):
flow_name = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
task_id = request.match_info.get("task_id")
return await self._async_table.get_task(
flow_name, run_number, step_name, task_id
)
@format_response
@handle_exceptions
async def create_task(self, request):
flow_id = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
body = await read_body(request.content)
user = body.get("user_name")
tags = body.get("tags")
system_tags = body.get("system_tags")
task = TaskRow(
flow_id=flow_id,
run_number=run_number,
step_name=step_name,
user_name=user,
tags=tags,
system_tags=system_tags,
)
return await self._async_table.add_task(task)
| true
| true
|
79089b7cb89d9cfa6870b713e23141a000877f3c
| 15,864
|
py
|
Python
|
release/scripts/startup/bl_ui/properties_physics_cloth.py
|
gunslingster/CSC581-assignement1
|
39012146e142bf400c7140d90ecfd27c45b589ca
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3
|
2020-08-07T11:35:09.000Z
|
2021-07-21T01:55:42.000Z
|
release/scripts/startup/bl_ui/properties_physics_cloth.py
|
mmtt1998819/blender
|
c9c3bf983321990a6960c422e002a372c35a6f76
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/startup/bl_ui/properties_physics_cloth.py
|
mmtt1998819/blender
|
c9c3bf983321990a6960c422e002a372c35a6f76
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5
|
2020-08-03T13:03:29.000Z
|
2021-08-07T22:10:26.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from bpy.types import (
Panel,
)
from bl_ui.utils import PresetPanel
from bl_ui.properties_physics_common import (
point_cache_ui,
effector_weights_ui,
)
def cloth_panel_enabled(md):
return md.point_cache.is_baked is False
class CLOTH_PT_presets(PresetPanel, Panel):
bl_label = "Cloth Presets"
preset_subdir = "cloth"
preset_operator = "script.execute_preset"
preset_add_operator = "cloth.preset_add"
class PhysicButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.type == 'MESH') and (context.engine in cls.COMPAT_ENGINES) and (context.cloth)
class PHYSICS_PT_cloth(PhysicButtonsPanel, Panel):
bl_label = "Cloth"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header_preset(self, _context):
CLOTH_PT_presets.draw_panel_header(self.layout)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "quality", text="Quality Steps")
col = flow.column()
col.prop(cloth, "time_scale", text="Speed Multiplier")
class PHYSICS_PT_cloth_physical_properties(PhysicButtonsPanel, Panel):
bl_label = "Physical Properties"
bl_parent_id = 'PHYSICS_PT_cloth'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "mass", text="Vertex Mass")
col = flow.column()
col.prop(cloth, "air_damping", text="Air Viscosity")
col = flow.column()
col.prop(cloth, "bending_model")
class PHYSICS_PT_cloth_stiffness(PhysicButtonsPanel, Panel):
bl_label = "Stiffness"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "compression_stiffness", text="Compression")
else:
col.prop(cloth, "tension_stiffness", text="Structural")
col = flow.column()
col.prop(cloth, "shear_stiffness", text="Shear")
col = flow.column()
col.prop(cloth, "bending_stiffness", text="Bending")
class PHYSICS_PT_cloth_damping(PhysicButtonsPanel, Panel):
bl_label = "Damping"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_damping", text="Tension")
col = flow.column()
col.prop(cloth, "compression_damping", text="Compression")
else:
col.prop(cloth, "tension_damping", text="Structural")
col = flow.column()
col.prop(cloth, "shear_damping", text="Shear")
col = flow.column()
col.prop(cloth, "bending_damping", text="Bending")
class PHYSICS_PT_cloth_internal_springs(PhysicButtonsPanel, Panel):
bl_label = "Internal Springs"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_internal_springs", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_internal_springs and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "internal_spring_max_length", text="Max Spring Creation Length")
col = flow.column()
col.prop(cloth, "internal_spring_max_diversion", text="Max Creation Diversion")
col = flow.column()
col.prop(cloth, "internal_spring_normal_check", text="Check Surface Normals")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness", text="Compression")
col = flow.column()
col.prop_search(cloth, "vertex_group_intern", ob, "vertex_groups", text="Vertex Group")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness_max", text="Max Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness_max", text="Max Compression")
class PHYSICS_PT_cloth_pressure(PhysicButtonsPanel, Panel):
bl_label = "Pressure"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_pressure", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_pressure and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "uniform_pressure_force")
col = flow.column()
col.prop(cloth, "use_pressure_volume", text="Custom Volume")
col = flow.column()
col.active = cloth.use_pressure_volume
col.prop(cloth, "target_volume")
col = flow.column()
col.prop(cloth, "pressure_factor")
col = flow.column()
col.prop(cloth, "fluid_density")
col = flow.column()
col.prop_search(cloth, "vertex_group_pressure", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_cache(PhysicButtonsPanel, Panel):
bl_label = "Cache"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
md = context.cloth
point_cache_ui(self, md.point_cache, cloth_panel_enabled(md), 'CLOTH')
class PHYSICS_PT_cloth_shape(PhysicButtonsPanel, Panel):
bl_label = "Shape"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column(align=True)
col.prop_search(cloth, "vertex_group_mass", ob, "vertex_groups", text="Pin Group")
sub = col.column(align=True)
sub.active = cloth.vertex_group_mass != ""
sub.prop(cloth, "pin_stiffness", text="Stiffness")
col.separator()
col = flow.column(align=True)
col.prop(cloth, "use_sewing_springs", text="Sewing")
sub = col.column(align=True)
sub.active = cloth.use_sewing_springs
sub.prop(cloth, "sewing_force_max", text="Max Sewing Force")
col.separator()
col = flow.column()
col.prop(cloth, "shrink_min", text="Shrinking Factor")
col = flow.column()
col.prop(cloth, "use_dynamic_mesh", text="Dynamic Mesh")
key = ob.data.shape_keys
if key:
col = flow.column()
col.active = not cloth.use_dynamic_mesh
col.prop_search(cloth, "rest_shape_key", key, "key_blocks", text="Rest Shape Key")
class PHYSICS_PT_cloth_collision(PhysicButtonsPanel, Panel):
bl_label = "Collisions"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = (cloth.use_collision or cloth.use_self_collision) and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "collision_quality", text="Quality")
class PHYSICS_PT_cloth_object_collision(PhysicButtonsPanel, Panel):
bl_label = "Object Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = cloth.use_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "impulse_clamp")
col = flow.column()
col.prop(cloth, "collection")
class PHYSICS_PT_cloth_self_collision(PhysicButtonsPanel, Panel):
bl_label = "Self Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_self_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
ob = context.object
layout.active = cloth.use_self_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "self_friction", text="Friction")
col = flow.column()
col.prop(cloth, "self_distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "self_impulse_clamp")
col = flow.column()
col.prop_search(cloth, "vertex_group_self_collisions", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_property_weights(PhysicButtonsPanel, Panel):
bl_label = "Property Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = context.cloth.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop_search(
cloth, "vertex_group_structural_stiffness", ob, "vertex_groups",
text="Structural Group",
)
col.prop(cloth, "tension_stiffness_max", text="Max Tension")
col.prop(cloth, "compression_stiffness_max", text="Max Compression")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shear_stiffness", ob, "vertex_groups",
text="Shear Group",
)
col.prop(cloth, "shear_stiffness_max", text="Max Shearing")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_bending", ob, "vertex_groups",
text="Bending Group"
)
col.prop(cloth, "bending_stiffness_max", text="Max Bending")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shrink", ob, "vertex_groups",
text="Shrinking Group"
)
col.prop(cloth, "shrink_max", text="Max Shrinking")
class PHYSICS_PT_cloth_field_weights(PhysicButtonsPanel, Panel):
bl_label = "Field Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
cloth = context.cloth.settings
effector_weights_ui(self, cloth.effector_weights, 'CLOTH')
classes = (
CLOTH_PT_presets,
PHYSICS_PT_cloth,
PHYSICS_PT_cloth_physical_properties,
PHYSICS_PT_cloth_stiffness,
PHYSICS_PT_cloth_damping,
PHYSICS_PT_cloth_internal_springs,
PHYSICS_PT_cloth_pressure,
PHYSICS_PT_cloth_cache,
PHYSICS_PT_cloth_shape,
PHYSICS_PT_cloth_collision,
PHYSICS_PT_cloth_object_collision,
PHYSICS_PT_cloth_self_collision,
PHYSICS_PT_cloth_property_weights,
PHYSICS_PT_cloth_field_weights,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 32.709278
| 107
| 0.67026
|
subdir = "cloth"
preset_operator = "script.execute_preset"
preset_add_operator = "cloth.preset_add"
class PhysicButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.type == 'MESH') and (context.engine in cls.COMPAT_ENGINES) and (context.cloth)
class PHYSICS_PT_cloth(PhysicButtonsPanel, Panel):
bl_label = "Cloth"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header_preset(self, _context):
CLOTH_PT_presets.draw_panel_header(self.layout)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "quality", text="Quality Steps")
col = flow.column()
col.prop(cloth, "time_scale", text="Speed Multiplier")
class PHYSICS_PT_cloth_physical_properties(PhysicButtonsPanel, Panel):
bl_label = "Physical Properties"
bl_parent_id = 'PHYSICS_PT_cloth'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "mass", text="Vertex Mass")
col = flow.column()
col.prop(cloth, "air_damping", text="Air Viscosity")
col = flow.column()
col.prop(cloth, "bending_model")
class PHYSICS_PT_cloth_stiffness(PhysicButtonsPanel, Panel):
bl_label = "Stiffness"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "compression_stiffness", text="Compression")
else:
col.prop(cloth, "tension_stiffness", text="Structural")
col = flow.column()
col.prop(cloth, "shear_stiffness", text="Shear")
col = flow.column()
col.prop(cloth, "bending_stiffness", text="Bending")
class PHYSICS_PT_cloth_damping(PhysicButtonsPanel, Panel):
bl_label = "Damping"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_damping", text="Tension")
col = flow.column()
col.prop(cloth, "compression_damping", text="Compression")
else:
col.prop(cloth, "tension_damping", text="Structural")
col = flow.column()
col.prop(cloth, "shear_damping", text="Shear")
col = flow.column()
col.prop(cloth, "bending_damping", text="Bending")
class PHYSICS_PT_cloth_internal_springs(PhysicButtonsPanel, Panel):
bl_label = "Internal Springs"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_internal_springs", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_internal_springs and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "internal_spring_max_length", text="Max Spring Creation Length")
col = flow.column()
col.prop(cloth, "internal_spring_max_diversion", text="Max Creation Diversion")
col = flow.column()
col.prop(cloth, "internal_spring_normal_check", text="Check Surface Normals")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness", text="Compression")
col = flow.column()
col.prop_search(cloth, "vertex_group_intern", ob, "vertex_groups", text="Vertex Group")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness_max", text="Max Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness_max", text="Max Compression")
class PHYSICS_PT_cloth_pressure(PhysicButtonsPanel, Panel):
bl_label = "Pressure"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_pressure", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_pressure and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "uniform_pressure_force")
col = flow.column()
col.prop(cloth, "use_pressure_volume", text="Custom Volume")
col = flow.column()
col.active = cloth.use_pressure_volume
col.prop(cloth, "target_volume")
col = flow.column()
col.prop(cloth, "pressure_factor")
col = flow.column()
col.prop(cloth, "fluid_density")
col = flow.column()
col.prop_search(cloth, "vertex_group_pressure", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_cache(PhysicButtonsPanel, Panel):
bl_label = "Cache"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
md = context.cloth
point_cache_ui(self, md.point_cache, cloth_panel_enabled(md), 'CLOTH')
class PHYSICS_PT_cloth_shape(PhysicButtonsPanel, Panel):
bl_label = "Shape"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column(align=True)
col.prop_search(cloth, "vertex_group_mass", ob, "vertex_groups", text="Pin Group")
sub = col.column(align=True)
sub.active = cloth.vertex_group_mass != ""
sub.prop(cloth, "pin_stiffness", text="Stiffness")
col.separator()
col = flow.column(align=True)
col.prop(cloth, "use_sewing_springs", text="Sewing")
sub = col.column(align=True)
sub.active = cloth.use_sewing_springs
sub.prop(cloth, "sewing_force_max", text="Max Sewing Force")
col.separator()
col = flow.column()
col.prop(cloth, "shrink_min", text="Shrinking Factor")
col = flow.column()
col.prop(cloth, "use_dynamic_mesh", text="Dynamic Mesh")
key = ob.data.shape_keys
if key:
col = flow.column()
col.active = not cloth.use_dynamic_mesh
col.prop_search(cloth, "rest_shape_key", key, "key_blocks", text="Rest Shape Key")
class PHYSICS_PT_cloth_collision(PhysicButtonsPanel, Panel):
bl_label = "Collisions"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = (cloth.use_collision or cloth.use_self_collision) and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "collision_quality", text="Quality")
class PHYSICS_PT_cloth_object_collision(PhysicButtonsPanel, Panel):
bl_label = "Object Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = cloth.use_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "impulse_clamp")
col = flow.column()
col.prop(cloth, "collection")
class PHYSICS_PT_cloth_self_collision(PhysicButtonsPanel, Panel):
bl_label = "Self Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_self_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
ob = context.object
layout.active = cloth.use_self_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "self_friction", text="Friction")
col = flow.column()
col.prop(cloth, "self_distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "self_impulse_clamp")
col = flow.column()
col.prop_search(cloth, "vertex_group_self_collisions", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_property_weights(PhysicButtonsPanel, Panel):
bl_label = "Property Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = context.cloth.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop_search(
cloth, "vertex_group_structural_stiffness", ob, "vertex_groups",
text="Structural Group",
)
col.prop(cloth, "tension_stiffness_max", text="Max Tension")
col.prop(cloth, "compression_stiffness_max", text="Max Compression")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shear_stiffness", ob, "vertex_groups",
text="Shear Group",
)
col.prop(cloth, "shear_stiffness_max", text="Max Shearing")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_bending", ob, "vertex_groups",
text="Bending Group"
)
col.prop(cloth, "bending_stiffness_max", text="Max Bending")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shrink", ob, "vertex_groups",
text="Shrinking Group"
)
col.prop(cloth, "shrink_max", text="Max Shrinking")
class PHYSICS_PT_cloth_field_weights(PhysicButtonsPanel, Panel):
bl_label = "Field Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
cloth = context.cloth.settings
effector_weights_ui(self, cloth.effector_weights, 'CLOTH')
classes = (
CLOTH_PT_presets,
PHYSICS_PT_cloth,
PHYSICS_PT_cloth_physical_properties,
PHYSICS_PT_cloth_stiffness,
PHYSICS_PT_cloth_damping,
PHYSICS_PT_cloth_internal_springs,
PHYSICS_PT_cloth_pressure,
PHYSICS_PT_cloth_cache,
PHYSICS_PT_cloth_shape,
PHYSICS_PT_cloth_collision,
PHYSICS_PT_cloth_object_collision,
PHYSICS_PT_cloth_self_collision,
PHYSICS_PT_cloth_property_weights,
PHYSICS_PT_cloth_field_weights,
)
if __name__ == "__main__":
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| true
| true
|
79089c07c18527cb1cb83c62b7ba01481c8aeb49
| 2,224
|
py
|
Python
|
samples/cli/accelbyte_py_sdk_cli/platform/_download.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
samples/cli/accelbyte_py_sdk_cli/platform/_download.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
samples/cli/accelbyte_py_sdk_cli/platform/_download.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import download as download_internal
@click.command()
@click.argument("campaign_id", type=str)
@click.option("--batch_no", "batch_no", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def download(
campaign_id: str,
batch_no: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(download_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = download_internal(
campaign_id=campaign_id,
batch_no=batch_no,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"download failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
download.operation_id = "download"
download.is_deprecated = False
| 30.054054
| 88
| 0.718525
|
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import download as download_internal
@click.command()
@click.argument("campaign_id", type=str)
@click.option("--batch_no", "batch_no", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def download(
campaign_id: str,
batch_no: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(download_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = download_internal(
campaign_id=campaign_id,
batch_no=batch_no,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"download failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
download.operation_id = "download"
download.is_deprecated = False
| true
| true
|
79089c89ffe201243369a9272a47ebeb11af1757
| 11,334
|
py
|
Python
|
pushservice/src/PushServiceBase.py
|
TwolDE2/enigma2-plugins
|
06685a5ce6a65a8724d3b32c8f7906714650ca2c
|
[
"OLDAP-2.3"
] | 30
|
2015-05-08T22:10:00.000Z
|
2022-03-13T22:09:31.000Z
|
pushservice/src/PushServiceBase.py
|
TwolDE2/enigma2-plugins
|
06685a5ce6a65a8724d3b32c8f7906714650ca2c
|
[
"OLDAP-2.3"
] | 124
|
2015-04-27T21:30:48.000Z
|
2022-03-29T10:21:39.000Z
|
pushservice/src/PushServiceBase.py
|
TwolDE2/enigma2-plugins
|
06685a5ce6a65a8724d3b32c8f7906714650ca2c
|
[
"OLDAP-2.3"
] | 193
|
2015-01-10T09:21:26.000Z
|
2022-03-21T08:19:33.000Z
|
from __future__ import print_function
from __future__ import absolute_import
#######################################################################
#
# Push Service for Enigma-2
# Coded by betonme (c) 2012 <glaserfrank(at)gmail.com>
# Support: http://www.i-have-a-dreambox.com/wbb2/thread.php?threadid=167779
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#######################################################################
import os
import sys
import traceback
from time import localtime, strftime
# Config
from Components.config import config
# XML
from xml.etree.cElementTree import Element, SubElement, Comment
from Tools.XMLTools import stringToXML
# Tools
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from Tools.BoundFunction import boundFunction
# Plugin internal
from . import _
from .Modules import Modules
from .ConfigFile import ConfigFile
from .ServiceBase import ServiceBase
from .ControllerBase import ControllerBase
import six
# Constants
SERVICE = "Service"
CONTROLLER = "Controller"
OPTION = "Option"
SERVICE_PATH = os.path.join(resolveFilename(SCOPE_PLUGINS), "Extensions/PushService/Services/")
CONTROLLER_PATH = os.path.join(resolveFilename(SCOPE_PLUGINS), "Extensions/PushService/Controller/")
class PushServiceBase(Modules, ConfigFile):
def __init__(self, path=""):
Modules.__init__(self)
ConfigFile.__init__(self)
self.services = []
self.controllers = []
self.pushcallbacks = {}
self.pusherrbacks = {}
# Read module files from subfolders
self.servicemodules = self.loadModules(SERVICE_PATH, ServiceBase)
self.controllermodules = self.loadModules(CONTROLLER_PATH, ControllerBase)
######################################
# Setter / Getter
def getServices(self):
return self.services or []
def getService(self, idx):
if idx < len(self.services):
return self.services[idx]
else:
return None
def getAvlServices(self):
slist = []
if self.servicemodules:
serviceclasses = [service.getClass() for service in self.services] if self.services else []
for name, module in six.iteritems(self.servicemodules):
if module.forceSingle():
# We have to check if there is already a plugin instance
if name in serviceclasses:
# A service instance already exists
continue
slist.append((name, module))
slist.sort()
return slist
def getServiceInstances(self):
return [(service.getNameId(), service) for service in self.getServices()]
def addService(self, module):
id = None
service = module and self.instantiateModule(module)
if service:
service.setEnable(True)
self.services.append(service)
self.services.sort(key=lambda x: (x.getUniqueID()))
id = service.getUniqueID()
return id
def removeService(self, service):
if service in self.services:
self.services.remove(service)
def getControllers(self):
return self.controllers or []
def getController(self, idx):
if idx < len(self.controllers):
return self.controllers[idx]
else:
return None
def getAvlControllers(self):
plist = []
if self.controllermodules:
controllerclasses = [controller.getClass() for controller in self.controllers] if self.controllers else []
for name, module in six.iteritems(self.controllermodules):
if module.forceSingle():
# We have to check if there is already a controller instance
if name in controllerclasses:
# A controller instance already exists
continue
plist.append((name, module))
plist.sort()
return plist
def getControllerInstances(self):
return [(controller.getNameId(), controller) for controller in self.getControllers()]
def addController(self, module):
id = None
controller = module and self.instantiateModule(module)
if controller:
controller.setEnable(True)
self.controllers.append(controller)
self.controllers.sort(key=lambda x: (x.getUniqueID()))
id = controller.getUniqueID()
return id
def removeController(self, controller):
if controller in self.controllers:
self.controllers.remove(controller)
######################################
# Config
def copyto(self, destination):
destination.services = self.services
destination.controllers = self.controllers
destination.servicemodules = self.servicemodules
destination.controllermodules = self.controllermodules
def copyfrom(self, source):
self.services = source.services
self.controllers = source.controllers
self.servicemodules = source.servicemodules
self.controllermodules = source.controllermodules
def load(self):
# Read xml config file
root = self.readXML()
if root:
services = []
controllers = []
# Reset the unique id counters
ServiceBase.resetUniqueID()
ControllerBase.resetUniqueID()
# Parse Config
def parse(root, typ, modules):
instances = []
if root:
for element in root.findall(typ):
name = element.get("name", "")
enable = element.get("enable", "True")
if name:
module = modules.get(name, None)
instance = self.instantiateModule(module)
if instance:
instance.setEnable(eval(enable))
# Set instance options
options = []
for option in element.findall(OPTION):
key = option.get("key", "")
value = option.text
if key and value:
options.append((key, value))
if options:
instance.setOptions(options)
# Append to active controller list
instances.append(instance)
return instances
services = parse(root, SERVICE, self.servicemodules)
controllers = parse(root, CONTROLLER, self.controllermodules)
self.services = services
self.controllers = controllers
else:
self.services = []
self.controllers = []
def save(self):
# Generate List in RAM
root = None
services = self.services
controllers = self.controllers
# Build Header
from .plugin import NAME, VERSION
root = Element(NAME)
root.set('version', VERSION)
root.append(Comment(_("Don't edit this manually unless you really know what you are doing")))
# Build Body
def build(root, instances, typ):
for instance in instances:
# Add module
element = SubElement(root, typ, name=stringToXML(instance.getName()), enable=stringToXML(instance.getStringEnable()))
# Add options
options = instance.getStringOptions()
if options:
for key, value, description in options:
SubElement(element, OPTION, key=stringToXML(key)).text = stringToXML(value)
return root
if services:
root = build(root, services, SERVICE)
if controllers:
root = build(root, controllers, CONTROLLER)
self.writeXML(root)
######################################
# Controller handling
def begin(self):
# Loop over all Services
for service in self.getServices():
if service.getEnable():
service.begin()
# Loop over all Controllers
for controller in self.getControllers():
if controller.getEnable():
controller.begin()
def end(self):
# Loop over all Services
for service in self.getServices():
if service.getEnable():
service.end()
# Loop over all Controllers
for controller in self.getControllers():
if controller.getEnable():
controller.end()
def run(self):
print(_("PushService started: ") + strftime(_("%d.%m.%Y %H:%M"), localtime()))
controllers = self.controllers
self.pushcallbacks = {}
self.pusherrbacks = {}
# Loop over all Controllers
if controllers:
for controller in controllers:
if controller.getEnable():
print(_("PushService running: ") + str(controller.getName()))
try:
# Run controller
ret = controller.run(
boundFunction(self.runcallback, controller),
boundFunction(self.runcallback, controller))
except Exception as e:
print(_("PushService controller run() exception"))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
def runcallback(self, controller, *args):
services = self.services
subject, body, attachments = "", "", []
# Parse return value(s)
if args:
if len(args) == 3:
subject, body, attachments = args
elif len(args) == 2:
# No attachments given
subject, body = args
else:
# Only header returned
subject = args
if subject:
# Push notification
self.push(controller, subject, body, attachments)
def runerrback(self, controller, *args):
print(_("controller %s returned error(s)") % controller.getName())
for arg in args:
if isinstance(arg, Exception):
print(str(arg.type), str(arg.value))
elif arg:
print(str(arg))
def push(self, controller, subject, text="", attachments=[]):
print("push")
services = self.services
if not services:
# Fallback to PopUp
module = self.servicemodules.get("PopUp", None)
popup = self.instantiateModule(module)
# Missing but not necessary: popup.begin() -> popup.push(...) -> popup.end()
services = [popup]
if services:
for service in services:
if service and service.getEnable():
try:
service.push(
boundFunction(self.pushcallback, service, controller),
boundFunction(self.pusherrback, service, controller),
controller.getName(),
subject, text, attachments)
except Exception as e:
print(_("PushService Service push() exception"))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
def pushcallback(self, service, controller, *args):
print("pushcallback")
key = (service, controller)
if key not in self.pushcallbacks:
self.pushcallbacks[key] = list(args)
else:
self.pushcallbacks[key].extend(list(args))
self.pushcheckbacks(key)
def pusherrback(self, service, controller, *args):
print("pusherrback")
print(_("Service %s returned error(s)") % service.getName())
for arg in args:
if isinstance(arg, Exception):
print(str(arg.type), str(arg.value))
elif arg:
print(str(arg))
key = (service, controller)
if key not in self.pusherrbacks:
self.pusherrbacks[key] = list(args)
else:
self.pusherrbacks[key].extend(list(args))
self.pushcheckbacks(key)
def pushcheckbacks(self, key):
print("pushcheckbacks")
callparam = self.pushcallbacks.get(key, [])
cntcall = len(callparam)
errparam = self.pusherrbacks.get(key, [])
cnterr = len(errparam)
cntservices = len([service for service in self.services if service.getEnable()])
# Check if all services already called and returned
if (cntservices == (cntcall + cnterr)):
service, controller = key
if controller:
# Check if no error is logged
if (cnterr == 0):
print("controller.callback()")
controller.callback()
else:
controller.errback()
print("controller.errback()")
| 29.21134
| 121
| 0.689342
|
from __future__ import print_function
from __future__ import absolute_import
ers = self.controllers
self.pushcallbacks = {}
self.pusherrbacks = {}
# Loop over all Controllers
if controllers:
for controller in controllers:
if controller.getEnable():
print(_("PushService running: ") + str(controller.getName()))
try:
# Run controller
ret = controller.run(
boundFunction(self.runcallback, controller),
boundFunction(self.runcallback, controller))
except Exception as e:
print(_("PushService controller run() exception"))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
def runcallback(self, controller, *args):
services = self.services
subject, body, attachments = "", "", []
# Parse return value(s)
if args:
if len(args) == 3:
subject, body, attachments = args
elif len(args) == 2:
# No attachments given
subject, body = args
else:
# Only header returned
subject = args
if subject:
# Push notification
self.push(controller, subject, body, attachments)
def runerrback(self, controller, *args):
print(_("controller %s returned error(s)") % controller.getName())
for arg in args:
if isinstance(arg, Exception):
print(str(arg.type), str(arg.value))
elif arg:
print(str(arg))
def push(self, controller, subject, text="", attachments=[]):
print("push")
services = self.services
if not services:
# Fallback to PopUp
module = self.servicemodules.get("PopUp", None)
popup = self.instantiateModule(module)
# Missing but not necessary: popup.begin() -> popup.push(...) -> popup.end()
services = [popup]
if services:
for service in services:
if service and service.getEnable():
try:
service.push(
boundFunction(self.pushcallback, service, controller),
boundFunction(self.pusherrback, service, controller),
controller.getName(),
subject, text, attachments)
except Exception as e:
print(_("PushService Service push() exception"))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
def pushcallback(self, service, controller, *args):
print("pushcallback")
key = (service, controller)
if key not in self.pushcallbacks:
self.pushcallbacks[key] = list(args)
else:
self.pushcallbacks[key].extend(list(args))
self.pushcheckbacks(key)
def pusherrback(self, service, controller, *args):
print("pusherrback")
print(_("Service %s returned error(s)") % service.getName())
for arg in args:
if isinstance(arg, Exception):
print(str(arg.type), str(arg.value))
elif arg:
print(str(arg))
key = (service, controller)
if key not in self.pusherrbacks:
self.pusherrbacks[key] = list(args)
else:
self.pusherrbacks[key].extend(list(args))
self.pushcheckbacks(key)
def pushcheckbacks(self, key):
print("pushcheckbacks")
callparam = self.pushcallbacks.get(key, [])
cntcall = len(callparam)
errparam = self.pusherrbacks.get(key, [])
cnterr = len(errparam)
cntservices = len([service for service in self.services if service.getEnable()])
# Check if all services already called and returned
if (cntservices == (cntcall + cnterr)):
service, controller = key
if controller:
# Check if no error is logged
if (cnterr == 0):
print("controller.callback()")
controller.callback()
else:
controller.errback()
print("controller.errback()")
| true
| true
|
79089ec0e4b0cce14e46bce0073b27b819f6e4ee
| 315
|
py
|
Python
|
soccer_trajectories/setup.py
|
sadmanca/soccerbot
|
5e60eacb51ff1b063ae8c1caf7eb01053add43eb
|
[
"BSD-3-Clause"
] | 56
|
2016-12-25T22:29:00.000Z
|
2022-01-06T04:42:00.000Z
|
soccer_trajectories/setup.py
|
utra-robosoccer/soccerbot
|
f5e95b00356e42cdd143ab26f67f23c9cd8afd5a
|
[
"BSD-3-Clause"
] | 244
|
2021-04-05T03:22:25.000Z
|
2022-03-31T16:47:36.000Z
|
soccer_trajectories/setup.py
|
sadmanca/soccerbot
|
5e60eacb51ff1b063ae8c1caf7eb01053add43eb
|
[
"BSD-3-Clause"
] | 7
|
2017-01-24T23:38:07.000Z
|
2022-01-19T16:58:08.000Z
|
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['soccer_trajectories'],
package_dir={'': 'src'},
)
setup(**setup_args)
| 24.230769
| 61
| 0.768254
|
import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['soccer_trajectories'],
package_dir={'': 'src'},
)
setup(**setup_args)
| true
| true
|
79089ee1b2be195532da5e3548198c73c7cd9335
| 3,503
|
py
|
Python
|
RFACA/foldx/foldx_scan.py
|
JinyuanSun/my_bio_script
|
ceb84e2e32c38b0889956f12c380354d23b28dc1
|
[
"MIT"
] | null | null | null |
RFACA/foldx/foldx_scan.py
|
JinyuanSun/my_bio_script
|
ceb84e2e32c38b0889956f12c380354d23b28dc1
|
[
"MIT"
] | null | null | null |
RFACA/foldx/foldx_scan.py
|
JinyuanSun/my_bio_script
|
ceb84e2e32c38b0889956f12c380354d23b28dc1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#By Sun Jinyuan and Cui Yinglu, 2021
foldx_exe = "/user/sunjinyuan/soft/foldx"
def getparser():
parser = argparse.ArgumentParser(description=
'To run Foldx PositionScan with multiple threads, make sure' +
' that you have the foldx and your pdb in the same floder')
parser.add_argument("-s", '--pdbfile', help="The pdb file, the repaired one")
parser.add_argument("-nt", '--number_threads', help="How many threads to run the Foldx")
parser.add_argument("-c", '--chain_id', help="Chain ID")
args = parser.parse_args()
return args
def SOfile2mutlist(pdbname, chain_id, foldx_exe):
AA_list = ["Q", "W", "E", "R", "T", "Y", "I", "P", "A", "S", "D", "F", "G", "H", "K", "L", "V", "N", "M"]
try:
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
except FileNotFoundError:
os.system(foldx_exe + " --command=SequenceOnly --pdb=" + pdbname)
#os.system("/data/home/jsun/mhetase/FoldX/foldx5 --command=SequenceOnly --pdb=" + pdbname)
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
mut_lst = []
for line in SO_file:
lst = line.replace("\n", "").split("\t")
if len(lst) > 3:
if lst[1] == chain_id:
wild_AA = lst[3][0]
for AA in AA_list:
if AA != wild_AA:
mut_lst.append(lst[3] + AA + ";")
return mut_lst
def multi_threads(mut_lst, threads, pdbname, foldx_exe):
t = len(mut_lst) // (int(threads) - 1)
n = 0
for i in range(0, len(mut_lst), t):
submutlst = mut_lst[i:i + t]
n = n + 1
# indi_lst_name = "individual_list_"+str(n)+"_.txt"
sub_dir_name = "Subdirectory" + str(n)
indi_lst_name = sub_dir_name + "/individual_list.txt"
os.mkdir(sub_dir_name)
os.system("cp " + pdbname + " " + sub_dir_name)
with open(indi_lst_name, "w+") as ind_lst:
for mut in submutlst:
ind_lst.write(mut + "\n")
ind_lst.close()
readablefilename = sub_dir_name + "/List_Mutations_readable.txt"
with open(readablefilename, "a+") as readablefile:
# KA12G
x = 1
for mut in submutlst:
readablefile.write(str(x)+" "+mut[0]+" "+mut[2:-2]+" "+mut[-2]+"\n")
#readablefile.write(str(x) + " " + mut[0] + " " + mut[2:-1] + " " + mut[-1] + "\n")
x += 1
readablefile.close()
cfg = "command=BuildModel\npdb=" + pdbname + "\nmutant-file=individual_list.txt\nnumberOfRuns=5"
cfg_name = sub_dir_name + "/BM_" + str(n) + ".cfg"
with open(cfg_name, "w+") as cfg_file:
cfg_file.write(cfg)
cfg_file.close()
with open("todo_list.sh", "a+") as todo_file:
todo_file.write("cd " + sub_dir_name + "\n")
todo_file.write("nohup "+foldx_exe+" -f " + "BM_" + str(n) + ".cfg" + " &\n")
todo_file.write("cd ..\n")
todo_file.close()
if __name__ == "__main__":
import os
import argparse
args = getparser()
pdbname = args.pdbfile
threads = args.number_threads
chain_id = args.chain_id
#print(foldx_exe)
with open("todo_list.sh", "w+") as todo_file:
todo_file.close()
mut_lst = SOfile2mutlist(pdbname, chain_id, foldx_exe)
multi_threads(mut_lst, threads, pdbname, foldx_exe)
| 36.489583
| 109
| 0.549814
|
foldx_exe = "/user/sunjinyuan/soft/foldx"
def getparser():
parser = argparse.ArgumentParser(description=
'To run Foldx PositionScan with multiple threads, make sure' +
' that you have the foldx and your pdb in the same floder')
parser.add_argument("-s", '--pdbfile', help="The pdb file, the repaired one")
parser.add_argument("-nt", '--number_threads', help="How many threads to run the Foldx")
parser.add_argument("-c", '--chain_id', help="Chain ID")
args = parser.parse_args()
return args
def SOfile2mutlist(pdbname, chain_id, foldx_exe):
AA_list = ["Q", "W", "E", "R", "T", "Y", "I", "P", "A", "S", "D", "F", "G", "H", "K", "L", "V", "N", "M"]
try:
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
except FileNotFoundError:
os.system(foldx_exe + " --command=SequenceOnly --pdb=" + pdbname)
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
mut_lst = []
for line in SO_file:
lst = line.replace("\n", "").split("\t")
if len(lst) > 3:
if lst[1] == chain_id:
wild_AA = lst[3][0]
for AA in AA_list:
if AA != wild_AA:
mut_lst.append(lst[3] + AA + ";")
return mut_lst
def multi_threads(mut_lst, threads, pdbname, foldx_exe):
t = len(mut_lst) // (int(threads) - 1)
n = 0
for i in range(0, len(mut_lst), t):
submutlst = mut_lst[i:i + t]
n = n + 1
sub_dir_name = "Subdirectory" + str(n)
indi_lst_name = sub_dir_name + "/individual_list.txt"
os.mkdir(sub_dir_name)
os.system("cp " + pdbname + " " + sub_dir_name)
with open(indi_lst_name, "w+") as ind_lst:
for mut in submutlst:
ind_lst.write(mut + "\n")
ind_lst.close()
readablefilename = sub_dir_name + "/List_Mutations_readable.txt"
with open(readablefilename, "a+") as readablefile:
x = 1
for mut in submutlst:
readablefile.write(str(x)+" "+mut[0]+" "+mut[2:-2]+" "+mut[-2]+"\n")
x += 1
readablefile.close()
cfg = "command=BuildModel\npdb=" + pdbname + "\nmutant-file=individual_list.txt\nnumberOfRuns=5"
cfg_name = sub_dir_name + "/BM_" + str(n) + ".cfg"
with open(cfg_name, "w+") as cfg_file:
cfg_file.write(cfg)
cfg_file.close()
with open("todo_list.sh", "a+") as todo_file:
todo_file.write("cd " + sub_dir_name + "\n")
todo_file.write("nohup "+foldx_exe+" -f " + "BM_" + str(n) + ".cfg" + " &\n")
todo_file.write("cd ..\n")
todo_file.close()
if __name__ == "__main__":
import os
import argparse
args = getparser()
pdbname = args.pdbfile
threads = args.number_threads
chain_id = args.chain_id
with open("todo_list.sh", "w+") as todo_file:
todo_file.close()
mut_lst = SOfile2mutlist(pdbname, chain_id, foldx_exe)
multi_threads(mut_lst, threads, pdbname, foldx_exe)
| false
| true
|
7908a28e2527cddfde07bba72841ff0141914230
| 342
|
py
|
Python
|
hyperflex_recommend/enpity/User.py
|
serviceoutsource/ML-AI
|
7b86f185b637a31026dba5502069ec8e42618ddb
|
[
"MIT"
] | null | null | null |
hyperflex_recommend/enpity/User.py
|
serviceoutsource/ML-AI
|
7b86f185b637a31026dba5502069ec8e42618ddb
|
[
"MIT"
] | null | null | null |
hyperflex_recommend/enpity/User.py
|
serviceoutsource/ML-AI
|
7b86f185b637a31026dba5502069ec8e42618ddb
|
[
"MIT"
] | null | null | null |
class User(object):
"""
"""
def __init__(self, user_id, user_name, user_cereal, user_midday, user_dinner):
self.user_id = user_id
self.user_name = user_name
self.user_cereal = user_cereal
self.user_midday = user_midday
self.user_dinner = user_dinner
if __name__ == '__main__':
pass
| 21.375
| 82
| 0.640351
|
class User(object):
def __init__(self, user_id, user_name, user_cereal, user_midday, user_dinner):
self.user_id = user_id
self.user_name = user_name
self.user_cereal = user_cereal
self.user_midday = user_midday
self.user_dinner = user_dinner
if __name__ == '__main__':
pass
| true
| true
|
7908a299d956bca84b66ca1a5bbe40f2f6eb6d4e
| 577
|
py
|
Python
|
apple/wallet/settings/settings_objects.py
|
grayfallstown/apple-blockchain
|
018041f158ac375f92c67b99f7ff163273407b6c
|
[
"Apache-2.0"
] | 15
|
2021-07-20T15:22:07.000Z
|
2022-02-09T04:28:46.000Z
|
apple/wallet/settings/settings_objects.py
|
grayfallstown/apple-blockchain
|
018041f158ac375f92c67b99f7ff163273407b6c
|
[
"Apache-2.0"
] | 17
|
2021-07-20T13:58:30.000Z
|
2021-10-10T04:24:29.000Z
|
apple/wallet/settings/settings_objects.py
|
grayfallstown/apple-blockchain
|
018041f158ac375f92c67b99f7ff163273407b6c
|
[
"Apache-2.0"
] | 4
|
2021-08-18T16:22:11.000Z
|
2022-03-15T08:24:01.000Z
|
from dataclasses import dataclass
from apple.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BackupInitialized(Streamable):
"""
Stores user decision regarding import of backup info
"""
user_initialized: bool # Stores if user made a selection in UI. (Skip vs Import backup)
user_skipped: bool # Stores if user decided to skip import of backup info
backup_info_imported: bool # Stores if backup info has been imported
new_wallet: bool # Stores if this wallet is newly created / not restored from backup
| 33.941176
| 92
| 0.755633
|
from dataclasses import dataclass
from apple.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BackupInitialized(Streamable):
user_initialized: bool
user_skipped: bool
backup_info_imported: bool
new_wallet: bool
| true
| true
|
7908a43481e0d18c695c99a4c7c7c1ebc9306c70
| 17,715
|
py
|
Python
|
plaster/main.py
|
erisyon/plaster
|
20af32aed2365c6351fe3c26293308960099152b
|
[
"MIT"
] | null | null | null |
plaster/main.py
|
erisyon/plaster
|
20af32aed2365c6351fe3c26293308960099152b
|
[
"MIT"
] | 22
|
2020-06-22T19:27:50.000Z
|
2021-09-30T20:02:31.000Z
|
plaster/main.py
|
erisyon/plaster
|
20af32aed2365c6351fe3c26293308960099152b
|
[
"MIT"
] | 2
|
2020-06-16T17:38:46.000Z
|
2021-08-06T09:37:22.000Z
|
#!/usr/bin/env python -u
"""
All commands that can be run in this project are available through this unified interface.
This should be run with the ./plaster.sh helper to get into the correct context.
"""
import tempfile
import numpy as np
import time
import os
import sys
import pandas as pd
import json
from pathlib import Path
from munch import Munch
from plumbum import colors
from plumbum import FG, TF, cli, local
from plaster.tools.zlog.zlog import important
from plaster.run.sigproc_v2 import synth
from plaster.tools.zlog.profile import prof, profile_from_file, profile_dump
from plaster.tools.utils.tmp import tmp_file
from plaster.tools.assets import assets
from plaster.tools.test_tools.test_tools import run_p
from plaster.run.run import RunResult
from plaster.tools.zlog import zlog
from plaster.tools.zlog.zlog import tell, h_line, spy
from plaster.tools.utils import tmp
from plaster.tools.utils import utils
import logging
log = logging.getLogger(__name__)
class CommandError(Exception):
def __init__(self, retcode=None):
self.retcode = retcode
def assert_env():
must_exist = ("ERISYON_ROOT", "JOBS_FOLDER")
found = 0
for e in must_exist:
if e in local.env:
found += 1
else:
print(f'Environment variable "{e}" not found.')
if found != len(must_exist):
raise CommandError(f"Environment variable(s) not found.")
class DoFuncs:
def is_dev(self):
return local.env.get("ERISYON_DEV") == "1"
def folder_user(self):
return local.env["FOLDER_USER"]
def run_user(self):
return local.env["RUN_USER"]
def clear(self):
local["clear"] & FG
def _print_job_folders(self, file_list, show_plaster_json=True):
"""
file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)]
"""
if len(file_list) == 0:
print("No files found")
return
folders = {
file.folder: Munch(folder=file.folder, size_gb=0, file_count=0,)
for file in file_list
}
gb = 1024 ** 3
total_gb = 0
for file in file_list:
folder = file.folder
total_gb += file.size / gb
folders[folder].size_gb += file.size / gb
folders[folder].file_count += 1
df = pd.DataFrame.from_dict(folders, orient="index")
formatters = dict(
size_gb="{:10.2f}".format,
folder="{:<40.40s}".format,
file_count="{:.0f}".format,
)
columns = ["folder", "size_gb", "file_count"]
df = df.append(dict(folder="TOTAL", size_gb=total_gb), ignore_index=True)
print(df.to_string(columns=columns, formatters=formatters))
def print_local_job_folders(self):
important("Local job folders:")
root = local.path("./jobs_folder")
self._print_job_folders(
[
Munch(
folder=(p - root)[0],
name=p.name,
size=int(p.stat().st_size),
mtime=int(p.stat().st_mtime),
)
for p in root.walk()
]
)
def validate_job_folder(self, job_folder, allow_run_folders=False):
return assets.validate_job_folder(
job_folder, allow_run_folders=allow_run_folders
)
def run_zests_v2(self, cli_args, debug_mode):
tell(f"Running zests v2...")
# as os.environ is evaluated when it is first imported
# we can't use any of the more graceful ways to set the environment
with local.env(RUN_ENV="test", ZAP_DEBUG_MODE=debug_mode):
zest_version = None
try:
from zest.version import __version__ as zest_version
except ImportError:
pass
assert zlog.config_dict is not None
assert zest_version.startswith("1.1.")
with tmp.tmp_file() as tmp_path:
with open(tmp_path, "w") as f:
f.write(json.dumps(zlog.config_dict))
# cli_args += ["--logger_config_json", tmp_path]
local["python"]["-u", "-m", "zest.zest_cli"].bound_command(
*cli_args
) & FG(retcode=None)
def run_nbstripout(self):
"""Strip all notebooks of output to save space in commits"""
important("Stripping Notebooks...")
result = (
local["find"][
".",
"-type",
"f",
"-not",
"-path",
"*/\.*",
"-name",
"*.ipynb",
"-print",
]
| local["xargs"]["nbstripout"]
) & TF(FG=True)
if not result:
raise CommandError
def run_docker_build(self, docker_tag, quiet=False):
important(f"Building docker tag {docker_tag}")
with local.env(LANG="en_US.UTF-8"):
args = [
"build",
"-t",
f"erisyon:{docker_tag}",
"-f",
"./scripts/main_env.docker",
]
if quiet:
args += ["--quiet"]
args += "."
local["docker"][args] & FG
class DoCommand(cli.Application, DoFuncs):
def main(self):
return
@DoCommand.subcommand("run_notebook")
class RunNotebookCommand(cli.Application, DoFuncs):
"""
Run a notebook rendered to HTML
"""
def main(self, notebook_path, output_path: Path = None):
args = [
"nbconvert",
"--to",
"html",
"--execute",
notebook_path,
"--ExecutePreprocessor.timeout=1800",
]
if output_path is not None:
args += ["--output", output_path]
local["jupyter"].bound_command(*args) & FG
@DoCommand.subcommand("profile")
class ProfileCommand(cli.Application, DoFuncs):
gb = 1024 ** 3
skip_hardware = cli.Flag("--skip_hardware", help="Do not include hardware profile")
skip_sigproc = cli.Flag("--skip_sigproc", help="Do not include sigproc profile")
def fileio_test(self, jobs_folder):
job_name = f"_profile/_{int(time.time()):08x}"
large_random = np.random.uniform(
size=1024 ** 3 // 8
) # 8 because floats are 8 bytes
def write_to(write_path):
# import shutil
# total, used, free = shutil.disk_usage(write_path.dirname)
# print(f"Free disk at {write_path}: {free / gb:2.2f}GB ({free / total:2.1f}%)")
write_path.dirname.mkdir()
with open(write_path, "wb") as f:
f.write(large_random)
# PROFILE write to jobs_folder
job_folder_write_path = jobs_folder / job_name
try:
with prof(
"fileio_to_jobs_folder", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(job_folder_write_path)
finally:
job_folder_write_path.delete()
# PROFILE write to plaster_tmp
with tmp_file() as plaster_tmp_folder_write_path:
with prof(
"fileio_to_plaster_tmp", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(plaster_tmp_folder_write_path)
# PROFILE write to /tmp
tmp_folder_write_path = local.path(tempfile.mkstemp())
try:
with prof("fileio_to_tmp", gbs=large_random.nbytes / self.gb, _tell=True):
write_to(tmp_folder_write_path)
finally:
tmp_folder_write_path.delete()
def cpu_test(self):
mat = np.random.uniform(size=(5000, 5000))
with prof(
"cpu_tests_matrix_invert",
mega_elems=(mat.shape[0] * mat.shape[1]) / 1e6,
_tell=True,
):
np.linalg.inv(mat)
def mem_test(self):
gb = 1024 ** 3
rnd = np.random.uniform(size=(1_000, 500_000))
with prof("mem_tests_copy", gbs=rnd.nbytes / gb, _tell=True):
rnd.copy()
def sigproc_test(self, jobs_folder):
"""
This is adapted from zest_sigproc_v2_integration
"""
profile_folder = jobs_folder / "_profile"
profile_folder.delete()
job_folder = profile_folder / "sigproc_test"
source_folder = profile_folder / "_synth_field"
job_folder.mkdir()
source_folder.mkdir()
# GENERATE some fake data
dim = (1024, 1024)
n_channels = 1
n_cycles = 10
n_peaks = 500
psf_width = 1.5
bg_mean = 100.0
bg_std = 30.0
gain = 5000.0
def _synth_field(fl_i):
with synth.Synth(n_channels=n_channels, n_cycles=n_cycles, dim=dim) as s:
peaks = (
synth.PeaksModelGaussianCircular(n_peaks=n_peaks)
.locs_randomize()
.widths_uniform(psf_width)
.amps_constant(gain)
)
synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)
synth.HaloModel()
synth.IlluminationQuadraticFalloffModel()
chcy_ims = s.render_chcy(0)
for ch_i in range(chcy_ims.shape[0]):
for cy_i in range(chcy_ims.shape[1]):
np.save(
str(
source_folder
/ f"area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy"
),
chcy_ims[ch_i, cy_i],
)
n_fields = 2
for fl_i in range(n_fields):
_synth_field(fl_i)
run_p(
[
f"gen",
f"sigproc_v2",
f"--job={job_folder}",
f"--sigproc_source={source_folder}",
f"--force",
f"--self_calib",
]
)
log_file = local.path(local.env["PLASTER_ROOT"]) / "plaster.log"
log_file.delete()
run_p(["run", job_folder, "--no_progress", "--skip_reports"])
profile_lines = profile_from_file(log_file)
with colors.fg.DeepSkyBlue3:
print()
print(h_line("--"))
print("PROFILE RESULTS")
print(h_line("--"))
profile_dump(profile_lines)
def main(self, jobs_folder):
assert_env()
jobs_folder = local.path(jobs_folder)
if not self.skip_hardware:
tell(colors.cyan | "Profiling file_io")
self.fileio_test(jobs_folder)
tell(colors.cyan | "Profiling cpu")
self.cpu_test()
tell(colors.cyan | "Profiling mem")
self.mem_test()
if not self.skip_sigproc:
tell(colors.cyan | "Profiling sigproc")
self.sigproc_test(jobs_folder)
@DoCommand.subcommand("profile_dump")
class ProfileDumpCommand(cli.Application, DoFuncs):
def main(self, log_path):
assert_env()
log_file = local.path(log_path)
profile_lines = profile_from_file(log_file)
profile_dump(profile_lines)
@DoCommand.subcommand("test")
class TestCommand(cli.Application, DoFuncs):
"""
Run tests
"""
no_clear = cli.Flag("--no_clear", help="Do not clear screen")
integration = cli.Flag("--integration", help="Run integration tests")
debug_mode = cli.Flag("--debug_mode", help="Put zap into debug_mode")
cli_mode = cli.Flag("--cli_mode", help="Run without ui")
def main(self, *args):
if not self.no_clear:
self.clear()
cli_args = list(args)
root = local.env["PLASTER_ROOT"]
cli_args += [f"--root={root}"]
folders = (
"./plaster",
"./plaster/scripts",
)
include_dirs = ":".join(folders)
cli_args += [f"--include_dirs={include_dirs}"]
with local.cwd(root):
cli_args += [f"--hook_start=./scripts/testing_start.py:test_setup_logs"]
if not self.debug_mode:
if not self.cli_mode:
cli_args += [f"--ui"]
cli_args += [f"--n_workers", "8"]
if self.integration:
cli_args += [f"--groups=integration"]
else:
cli_args += [f"--exclude_groups=integration"]
return self.run_zests_v2(cli_args, self.debug_mode)
@DoCommand.subcommand("jupyter")
class JupyterCommand(cli.Application, DoFuncs):
ip = cli.SwitchAttr("--ip", str, default="0.0.0.0", help="ip to bind to")
port = cli.SwitchAttr("--port", int, default="8080", help="port to bind to")
def main(self, *args):
assert_env()
os.execlp(
"jupyter",
"jupyter",
"notebook",
f"--ip={self.ip}",
f"--port={self.port}",
"--allow-root",
*args,
)
@DoCommand.subcommand("pluck")
class PluckCommand(cli.Application, DoFuncs):
"""
Pluck a field from a result pickle
"""
save_npy = cli.SwitchAttr("--save_npy", str, default=None, help="save as npy file")
save_csv = cli.SwitchAttr(
"--save_csv", str, default=None, help="save as csv file (dataframe only)"
)
save_pkl = cli.SwitchAttr(
"--save_pkl", str, default=None, help="save as pkl file (dataframe only)"
)
def main(self, run_path, symbol):
"""
run_path: path to the run folder
symbol: Eg: "sigproc_v2.sig"
"""
run = RunResult(run_path)
parts = symbol.split(".")
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = sym
if self.save_npy is not None:
assert isinstance(val, np.ndarray)
np.save(self.save_npy, val)
if self.save_csv is not None:
assert isinstance(val, pd.DataFrame)
val.to_csv(self.save_csv)
if self.save_pkl is not None:
assert isinstance(val, pd.DataFrame)
val.to_pickle(self.save_pkl)
@DoCommand.subcommand("export_sigproc_v2")
class ExportSigprocV2Command(cli.Application, DoFuncs):
"""
Export sigproc_v2 and raw data in easy to use formats.
"""
def main(self, run_path):
"""
run_path: path to the run folder (don't forget this is a subfolder of job)
"""
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f"{name}__"
tell(f"Prefixing saved files with {prefix}")
tell("Saving sig.npy")
np.save(f"{prefix}sig.npy", run.sigproc_v2.sig())
tell("Saving noi.npy")
np.save(f"{prefix}noi.npy", run.sigproc_v2.noi())
tell("Saving df.csv")
run.sigproc_v2.fields__n_peaks__peaks__radmat().to_csv(f"{prefix}df.csv")
ims = []
for fl_i in range(run.sigproc_v2.n_fields):
tell(f"Loading align field {fl_i} of {run.sigproc_v2.n_fields}")
ims += [run.sigproc_v2.aln_unfilt_chcy_ims(fl_i)]
tell("Saving aln_ims.npy")
np.save(f"{prefix}aln_ims.npy", np.stack(ims))
tell("Saving example.py")
utils.save(
f"{prefix}example.py",
f"import numpy as np\n"
+ f"import pandas as pd\n\n"
+ f'prefix = "{prefix}"'
+ utils.smart_wrap(
"""
sig = np.load(f"{prefix}sig.npy")
noi = np.load(f"{prefix}noi.npy")
df = pd.read_csv(f"{prefix}df.csv")
ims = np.load(f"{prefix}aln_ims.npy", mmap_mode="r")
n_peaks = sig.shape[0]
n_fields, n_channels, n_cycles, im_mea, _ = ims.shape
# Examine some peak
peak_i = 123 # 0 <= peak_i < n_peaks
ch_i = 0 # 0 <= ch_i < n_channels
cy_i = 0 # 0 <= cy_i < n_cycles
y, x, fl_i = df[df.peak_i == peak_i][["aln_y", "aln_x", "field_i"]].drop_duplicates().values.flatten().astype(int)
peak_radius = 10
peak_im = ims[fl_i, ch_i, cy_i, y-peak_radius:y+peak_radius, x-peak_radius:x+peak_radius]
# Now peak_im is a centered sub-image of that peak with shape=(peak_radius, peak_radius)
""",
width=200,
assert_if_exceeds_width=True,
),
)
tell("\n\nThe following commands may be useful:")
# tell(f" tar czf {prefix}data.tar.gz {prefix}sig.npy {prefix}noi.npy {prefix}df.csv")
# tell(f" tar czf {prefix}ims.tar.gz {prefix}aln_ims.npy")
# tell("")
# tell(f" aws s3 cp {prefix}data.tar.gz s3://erisyon-public")
# tell(f" aws s3 cp {prefix}ims.tar.gz s3://erisyon-public")
tell(f" aws s3 cp {prefix}sig.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}noi.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}df.csv s3://erisyon-public")
tell(f" aws s3 cp {prefix}aln_ims.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}example.py s3://erisyon-public")
if __name__ == "__main__":
try:
DoCommand.subcommand("gen", "plaster.gen.gen_main.GenApp")
DoCommand.subcommand("run", "plaster.run.run_main.RunApp")
DoCommand.run()
except (KeyboardInterrupt):
print() # Add an extra line because various thing terminate with \r
sys.exit(1)
except Exception as e:
log.exception(e)
sys.exit(1)
| 31.690519
| 130
| 0.556816
|
import tempfile
import numpy as np
import time
import os
import sys
import pandas as pd
import json
from pathlib import Path
from munch import Munch
from plumbum import colors
from plumbum import FG, TF, cli, local
from plaster.tools.zlog.zlog import important
from plaster.run.sigproc_v2 import synth
from plaster.tools.zlog.profile import prof, profile_from_file, profile_dump
from plaster.tools.utils.tmp import tmp_file
from plaster.tools.assets import assets
from plaster.tools.test_tools.test_tools import run_p
from plaster.run.run import RunResult
from plaster.tools.zlog import zlog
from plaster.tools.zlog.zlog import tell, h_line, spy
from plaster.tools.utils import tmp
from plaster.tools.utils import utils
import logging
log = logging.getLogger(__name__)
class CommandError(Exception):
def __init__(self, retcode=None):
self.retcode = retcode
def assert_env():
must_exist = ("ERISYON_ROOT", "JOBS_FOLDER")
found = 0
for e in must_exist:
if e in local.env:
found += 1
else:
print(f'Environment variable "{e}" not found.')
if found != len(must_exist):
raise CommandError(f"Environment variable(s) not found.")
class DoFuncs:
def is_dev(self):
return local.env.get("ERISYON_DEV") == "1"
def folder_user(self):
return local.env["FOLDER_USER"]
def run_user(self):
return local.env["RUN_USER"]
def clear(self):
local["clear"] & FG
def _print_job_folders(self, file_list, show_plaster_json=True):
if len(file_list) == 0:
print("No files found")
return
folders = {
file.folder: Munch(folder=file.folder, size_gb=0, file_count=0,)
for file in file_list
}
gb = 1024 ** 3
total_gb = 0
for file in file_list:
folder = file.folder
total_gb += file.size / gb
folders[folder].size_gb += file.size / gb
folders[folder].file_count += 1
df = pd.DataFrame.from_dict(folders, orient="index")
formatters = dict(
size_gb="{:10.2f}".format,
folder="{:<40.40s}".format,
file_count="{:.0f}".format,
)
columns = ["folder", "size_gb", "file_count"]
df = df.append(dict(folder="TOTAL", size_gb=total_gb), ignore_index=True)
print(df.to_string(columns=columns, formatters=formatters))
def print_local_job_folders(self):
important("Local job folders:")
root = local.path("./jobs_folder")
self._print_job_folders(
[
Munch(
folder=(p - root)[0],
name=p.name,
size=int(p.stat().st_size),
mtime=int(p.stat().st_mtime),
)
for p in root.walk()
]
)
def validate_job_folder(self, job_folder, allow_run_folders=False):
return assets.validate_job_folder(
job_folder, allow_run_folders=allow_run_folders
)
def run_zests_v2(self, cli_args, debug_mode):
tell(f"Running zests v2...")
with local.env(RUN_ENV="test", ZAP_DEBUG_MODE=debug_mode):
zest_version = None
try:
from zest.version import __version__ as zest_version
except ImportError:
pass
assert zlog.config_dict is not None
assert zest_version.startswith("1.1.")
with tmp.tmp_file() as tmp_path:
with open(tmp_path, "w") as f:
f.write(json.dumps(zlog.config_dict))
# cli_args += ["--logger_config_json", tmp_path]
local["python"]["-u", "-m", "zest.zest_cli"].bound_command(
*cli_args
) & FG(retcode=None)
def run_nbstripout(self):
important("Stripping Notebooks...")
result = (
local["find"][
".",
"-type",
"f",
"-not",
"-path",
"*/\.*",
"-name",
"*.ipynb",
"-print",
]
| local["xargs"]["nbstripout"]
) & TF(FG=True)
if not result:
raise CommandError
def run_docker_build(self, docker_tag, quiet=False):
important(f"Building docker tag {docker_tag}")
with local.env(LANG="en_US.UTF-8"):
args = [
"build",
"-t",
f"erisyon:{docker_tag}",
"-f",
"./scripts/main_env.docker",
]
if quiet:
args += ["--quiet"]
args += "."
local["docker"][args] & FG
class DoCommand(cli.Application, DoFuncs):
def main(self):
return
@DoCommand.subcommand("run_notebook")
class RunNotebookCommand(cli.Application, DoFuncs):
def main(self, notebook_path, output_path: Path = None):
args = [
"nbconvert",
"--to",
"html",
"--execute",
notebook_path,
"--ExecutePreprocessor.timeout=1800",
]
if output_path is not None:
args += ["--output", output_path]
local["jupyter"].bound_command(*args) & FG
@DoCommand.subcommand("profile")
class ProfileCommand(cli.Application, DoFuncs):
gb = 1024 ** 3
skip_hardware = cli.Flag("--skip_hardware", help="Do not include hardware profile")
skip_sigproc = cli.Flag("--skip_sigproc", help="Do not include sigproc profile")
def fileio_test(self, jobs_folder):
job_name = f"_profile/_{int(time.time()):08x}"
large_random = np.random.uniform(
size=1024 ** 3 // 8
) # 8 because floats are 8 bytes
def write_to(write_path):
# import shutil
# total, used, free = shutil.disk_usage(write_path.dirname)
# print(f"Free disk at {write_path}: {free / gb:2.2f}GB ({free / total:2.1f}%)")
write_path.dirname.mkdir()
with open(write_path, "wb") as f:
f.write(large_random)
# PROFILE write to jobs_folder
job_folder_write_path = jobs_folder / job_name
try:
with prof(
"fileio_to_jobs_folder", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(job_folder_write_path)
finally:
job_folder_write_path.delete()
# PROFILE write to plaster_tmp
with tmp_file() as plaster_tmp_folder_write_path:
with prof(
"fileio_to_plaster_tmp", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(plaster_tmp_folder_write_path)
# PROFILE write to /tmp
tmp_folder_write_path = local.path(tempfile.mkstemp())
try:
with prof("fileio_to_tmp", gbs=large_random.nbytes / self.gb, _tell=True):
write_to(tmp_folder_write_path)
finally:
tmp_folder_write_path.delete()
def cpu_test(self):
mat = np.random.uniform(size=(5000, 5000))
with prof(
"cpu_tests_matrix_invert",
mega_elems=(mat.shape[0] * mat.shape[1]) / 1e6,
_tell=True,
):
np.linalg.inv(mat)
def mem_test(self):
gb = 1024 ** 3
rnd = np.random.uniform(size=(1_000, 500_000))
with prof("mem_tests_copy", gbs=rnd.nbytes / gb, _tell=True):
rnd.copy()
def sigproc_test(self, jobs_folder):
profile_folder = jobs_folder / "_profile"
profile_folder.delete()
job_folder = profile_folder / "sigproc_test"
source_folder = profile_folder / "_synth_field"
job_folder.mkdir()
source_folder.mkdir()
# GENERATE some fake data
dim = (1024, 1024)
n_channels = 1
n_cycles = 10
n_peaks = 500
psf_width = 1.5
bg_mean = 100.0
bg_std = 30.0
gain = 5000.0
def _synth_field(fl_i):
with synth.Synth(n_channels=n_channels, n_cycles=n_cycles, dim=dim) as s:
peaks = (
synth.PeaksModelGaussianCircular(n_peaks=n_peaks)
.locs_randomize()
.widths_uniform(psf_width)
.amps_constant(gain)
)
synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)
synth.HaloModel()
synth.IlluminationQuadraticFalloffModel()
chcy_ims = s.render_chcy(0)
for ch_i in range(chcy_ims.shape[0]):
for cy_i in range(chcy_ims.shape[1]):
np.save(
str(
source_folder
/ f"area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy"
),
chcy_ims[ch_i, cy_i],
)
n_fields = 2
for fl_i in range(n_fields):
_synth_field(fl_i)
run_p(
[
f"gen",
f"sigproc_v2",
f"--job={job_folder}",
f"--sigproc_source={source_folder}",
f"--force",
f"--self_calib",
]
)
log_file = local.path(local.env["PLASTER_ROOT"]) / "plaster.log"
log_file.delete()
run_p(["run", job_folder, "--no_progress", "--skip_reports"])
profile_lines = profile_from_file(log_file)
with colors.fg.DeepSkyBlue3:
print()
print(h_line("--"))
print("PROFILE RESULTS")
print(h_line("--"))
profile_dump(profile_lines)
def main(self, jobs_folder):
assert_env()
jobs_folder = local.path(jobs_folder)
if not self.skip_hardware:
tell(colors.cyan | "Profiling file_io")
self.fileio_test(jobs_folder)
tell(colors.cyan | "Profiling cpu")
self.cpu_test()
tell(colors.cyan | "Profiling mem")
self.mem_test()
if not self.skip_sigproc:
tell(colors.cyan | "Profiling sigproc")
self.sigproc_test(jobs_folder)
@DoCommand.subcommand("profile_dump")
class ProfileDumpCommand(cli.Application, DoFuncs):
def main(self, log_path):
assert_env()
log_file = local.path(log_path)
profile_lines = profile_from_file(log_file)
profile_dump(profile_lines)
@DoCommand.subcommand("test")
class TestCommand(cli.Application, DoFuncs):
no_clear = cli.Flag("--no_clear", help="Do not clear screen")
integration = cli.Flag("--integration", help="Run integration tests")
debug_mode = cli.Flag("--debug_mode", help="Put zap into debug_mode")
cli_mode = cli.Flag("--cli_mode", help="Run without ui")
def main(self, *args):
if not self.no_clear:
self.clear()
cli_args = list(args)
root = local.env["PLASTER_ROOT"]
cli_args += [f"--root={root}"]
folders = (
"./plaster",
"./plaster/scripts",
)
include_dirs = ":".join(folders)
cli_args += [f"--include_dirs={include_dirs}"]
with local.cwd(root):
cli_args += [f"--hook_start=./scripts/testing_start.py:test_setup_logs"]
if not self.debug_mode:
if not self.cli_mode:
cli_args += [f"--ui"]
cli_args += [f"--n_workers", "8"]
if self.integration:
cli_args += [f"--groups=integration"]
else:
cli_args += [f"--exclude_groups=integration"]
return self.run_zests_v2(cli_args, self.debug_mode)
@DoCommand.subcommand("jupyter")
class JupyterCommand(cli.Application, DoFuncs):
ip = cli.SwitchAttr("--ip", str, default="0.0.0.0", help="ip to bind to")
port = cli.SwitchAttr("--port", int, default="8080", help="port to bind to")
def main(self, *args):
assert_env()
os.execlp(
"jupyter",
"jupyter",
"notebook",
f"--ip={self.ip}",
f"--port={self.port}",
"--allow-root",
*args,
)
@DoCommand.subcommand("pluck")
class PluckCommand(cli.Application, DoFuncs):
save_npy = cli.SwitchAttr("--save_npy", str, default=None, help="save as npy file")
save_csv = cli.SwitchAttr(
"--save_csv", str, default=None, help="save as csv file (dataframe only)"
)
save_pkl = cli.SwitchAttr(
"--save_pkl", str, default=None, help="save as pkl file (dataframe only)"
)
def main(self, run_path, symbol):
run = RunResult(run_path)
parts = symbol.split(".")
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = sym
if self.save_npy is not None:
assert isinstance(val, np.ndarray)
np.save(self.save_npy, val)
if self.save_csv is not None:
assert isinstance(val, pd.DataFrame)
val.to_csv(self.save_csv)
if self.save_pkl is not None:
assert isinstance(val, pd.DataFrame)
val.to_pickle(self.save_pkl)
@DoCommand.subcommand("export_sigproc_v2")
class ExportSigprocV2Command(cli.Application, DoFuncs):
def main(self, run_path):
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f"{name}__"
tell(f"Prefixing saved files with {prefix}")
tell("Saving sig.npy")
np.save(f"{prefix}sig.npy", run.sigproc_v2.sig())
tell("Saving noi.npy")
np.save(f"{prefix}noi.npy", run.sigproc_v2.noi())
tell("Saving df.csv")
run.sigproc_v2.fields__n_peaks__peaks__radmat().to_csv(f"{prefix}df.csv")
ims = []
for fl_i in range(run.sigproc_v2.n_fields):
tell(f"Loading align field {fl_i} of {run.sigproc_v2.n_fields}")
ims += [run.sigproc_v2.aln_unfilt_chcy_ims(fl_i)]
tell("Saving aln_ims.npy")
np.save(f"{prefix}aln_ims.npy", np.stack(ims))
tell("Saving example.py")
utils.save(
f"{prefix}example.py",
f"import numpy as np\n"
+ f"import pandas as pd\n\n"
+ f'prefix = "{prefix}"'
+ utils.smart_wrap(
"""
sig = np.load(f"{prefix}sig.npy")
noi = np.load(f"{prefix}noi.npy")
df = pd.read_csv(f"{prefix}df.csv")
ims = np.load(f"{prefix}aln_ims.npy", mmap_mode="r")
n_peaks = sig.shape[0]
n_fields, n_channels, n_cycles, im_mea, _ = ims.shape
# Examine some peak
peak_i = 123 # 0 <= peak_i < n_peaks
ch_i = 0 # 0 <= ch_i < n_channels
cy_i = 0 # 0 <= cy_i < n_cycles
y, x, fl_i = df[df.peak_i == peak_i][["aln_y", "aln_x", "field_i"]].drop_duplicates().values.flatten().astype(int)
peak_radius = 10
peak_im = ims[fl_i, ch_i, cy_i, y-peak_radius:y+peak_radius, x-peak_radius:x+peak_radius]
# Now peak_im is a centered sub-image of that peak with shape=(peak_radius, peak_radius)
""",
width=200,
assert_if_exceeds_width=True,
),
)
tell("\n\nThe following commands may be useful:")
# tell(f" tar czf {prefix}data.tar.gz {prefix}sig.npy {prefix}noi.npy {prefix}df.csv")
# tell(f" tar czf {prefix}ims.tar.gz {prefix}aln_ims.npy")
# tell("")
# tell(f" aws s3 cp {prefix}data.tar.gz s3://erisyon-public")
# tell(f" aws s3 cp {prefix}ims.tar.gz s3://erisyon-public")
tell(f" aws s3 cp {prefix}sig.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}noi.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}df.csv s3://erisyon-public")
tell(f" aws s3 cp {prefix}aln_ims.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}example.py s3://erisyon-public")
if __name__ == "__main__":
try:
DoCommand.subcommand("gen", "plaster.gen.gen_main.GenApp")
DoCommand.subcommand("run", "plaster.run.run_main.RunApp")
DoCommand.run()
except (KeyboardInterrupt):
print() # Add an extra line because various thing terminate with \r
sys.exit(1)
except Exception as e:
log.exception(e)
sys.exit(1)
| true
| true
|
7908a4da11350dcc729f2f370f826d6e172bbe48
| 780
|
py
|
Python
|
RandomWords.py
|
Makemeproud/BitcoinGenerator
|
10e2864a2254635153c757beece028c85a31e1ca
|
[
"Apache-2.0"
] | null | null | null |
RandomWords.py
|
Makemeproud/BitcoinGenerator
|
10e2864a2254635153c757beece028c85a31e1ca
|
[
"Apache-2.0"
] | null | null | null |
RandomWords.py
|
Makemeproud/BitcoinGenerator
|
10e2864a2254635153c757beece028c85a31e1ca
|
[
"Apache-2.0"
] | 1
|
2022-02-27T14:57:19.000Z
|
2022-02-27T14:57:19.000Z
|
#!/usr/bin/env python
'''
Pull random words from http://world.std.com/~reinhold/diceware.wordlist.asc
Written 2013 Hal Canary.
Dedicated to the public domain.
'''
import random,math,sys,os
useDevRandom = True
dicewareWordlist = '~/Downloads/diceware.wordlist.asc'
with open(os.path.expanduser(dicewareWordlist)) as f:
WordList = [line.split()[1]
for nu,line in enumerate(f) if 2 <= nu < 7778]
def GetRandom():
if useDevRandom:
with open('/dev/random', 'rb') as f:
random.seed(f.read(16))
return random
else:
return random.SystemRandom()
required_entropy = 128
numwords = int(math.ceil(required_entropy / math.log(len(WordList),2)))
s = ' '.join(GetRandom().choice(WordList) for i in xrange(numwords))
sys.stdout.write(s)
sys.stdout.flush()
sys.stderr.write('\n')
| 28.888889
| 75
| 0.723077
|
import random,math,sys,os
useDevRandom = True
dicewareWordlist = '~/Downloads/diceware.wordlist.asc'
with open(os.path.expanduser(dicewareWordlist)) as f:
WordList = [line.split()[1]
for nu,line in enumerate(f) if 2 <= nu < 7778]
def GetRandom():
if useDevRandom:
with open('/dev/random', 'rb') as f:
random.seed(f.read(16))
return random
else:
return random.SystemRandom()
required_entropy = 128
numwords = int(math.ceil(required_entropy / math.log(len(WordList),2)))
s = ' '.join(GetRandom().choice(WordList) for i in xrange(numwords))
sys.stdout.write(s)
sys.stdout.flush()
sys.stderr.write('\n')
| true
| true
|
7908a58e9deb3412d473d4b3179c30a4123c16cc
| 1,236
|
py
|
Python
|
Python_OO/Exercicio.py
|
Madara701/Python_OO
|
8d67569a8c4771dd82f5259c2ed5e782cd4e4036
|
[
"Apache-2.0"
] | null | null | null |
Python_OO/Exercicio.py
|
Madara701/Python_OO
|
8d67569a8c4771dd82f5259c2ed5e782cd4e4036
|
[
"Apache-2.0"
] | null | null | null |
Python_OO/Exercicio.py
|
Madara701/Python_OO
|
8d67569a8c4771dd82f5259c2ed5e782cd4e4036
|
[
"Apache-2.0"
] | null | null | null |
class Pessoa:
def __init__(self,nome,idade,cpf,salario):
self.nome = nome
self.idade = idade
self.cpf = cpf
self.salario = salario
def Aumento(self):
return self.salario *0.05
class Gerente(Pessoa):
def __init__(self,nome,idade,cpf,salario,senha):
super().__init__(nome,idade,cpf,salario)
self.senha = senha
def Aumento(self):
return self.salario * 0.01 + 1000
p = Gerente('Fabio',25,41075570816,21000,456578)
print(p.nome)
print(p.idade)
print(p.cpf)
print(p.senha)
print(p.salario)
print(p.Aumento())
print('='*30)
class Animal:
def __init__(self,nome,raca,cor,peso,comportamento = True):
self.nome = nome
self.raca = raca
self.cor = cor
self.peso = peso
self.comportamento = comportamento
def Comportamento(self):
if(self.comportamento == False):
return self.peso + 500
print('Ta Gordo por sem ruim')
class Pitbull(Animal):
pass
#def Comportamento(self):
#return False
dog = Pitbull('Luci','Pitbull','Preta',53,False)
print(dog.nome)
print(dog.raca)
print(dog.cor)
print(dog.peso)
print(dog.Comportamento())
| 20.949153
| 63
| 0.61165
|
class Pessoa:
def __init__(self,nome,idade,cpf,salario):
self.nome = nome
self.idade = idade
self.cpf = cpf
self.salario = salario
def Aumento(self):
return self.salario *0.05
class Gerente(Pessoa):
def __init__(self,nome,idade,cpf,salario,senha):
super().__init__(nome,idade,cpf,salario)
self.senha = senha
def Aumento(self):
return self.salario * 0.01 + 1000
p = Gerente('Fabio',25,41075570816,21000,456578)
print(p.nome)
print(p.idade)
print(p.cpf)
print(p.senha)
print(p.salario)
print(p.Aumento())
print('='*30)
class Animal:
def __init__(self,nome,raca,cor,peso,comportamento = True):
self.nome = nome
self.raca = raca
self.cor = cor
self.peso = peso
self.comportamento = comportamento
def Comportamento(self):
if(self.comportamento == False):
return self.peso + 500
print('Ta Gordo por sem ruim')
class Pitbull(Animal):
pass
dog = Pitbull('Luci','Pitbull','Preta',53,False)
print(dog.nome)
print(dog.raca)
print(dog.cor)
print(dog.peso)
print(dog.Comportamento())
| true
| true
|
7908a5d98b2e78810c7b93a99ad02c8535a66efd
| 400
|
py
|
Python
|
polrev/areas/widgets/congressional_district_widgets.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | 1
|
2021-12-10T05:54:16.000Z
|
2021-12-10T05:54:16.000Z
|
polrev/areas/widgets/congressional_district_widgets.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | null | null | null |
polrev/areas/widgets/congressional_district_widgets.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | null | null | null |
from django.utils.translation import gettext_lazy as _
from generic_chooser.widgets import AdminChooser, LinkedFieldMixin
from areas.models import CongressionalDistrict
class CongressionalDistrictChooser(LinkedFieldMixin, AdminChooser):
#icon = 'user'
model = CongressionalDistrict
page_title = _("Choose a district")
choose_modal_url_name = 'congressional_district_chooser:choose'
| 36.363636
| 67
| 0.8175
|
from django.utils.translation import gettext_lazy as _
from generic_chooser.widgets import AdminChooser, LinkedFieldMixin
from areas.models import CongressionalDistrict
class CongressionalDistrictChooser(LinkedFieldMixin, AdminChooser):
model = CongressionalDistrict
page_title = _("Choose a district")
choose_modal_url_name = 'congressional_district_chooser:choose'
| true
| true
|
7908a6550b0c0adbf7a047f819a435542bde8f9f
| 6,642
|
py
|
Python
|
acore/classifier_power_multid_truth.py
|
zhao-david/ACORE-LFI
|
91de88b77f0be110e42ed91bbb7a50b7ca83319a
|
[
"MIT"
] | 9
|
2020-03-17T10:38:28.000Z
|
2022-03-10T20:05:11.000Z
|
acore/classifier_power_multid_truth.py
|
zhao-david/ACORE-LFI
|
91de88b77f0be110e42ed91bbb7a50b7ca83319a
|
[
"MIT"
] | null | null | null |
acore/classifier_power_multid_truth.py
|
zhao-david/ACORE-LFI
|
91de88b77f0be110e42ed91bbb7a50b7ca83319a
|
[
"MIT"
] | 1
|
2020-10-15T19:44:12.000Z
|
2020-10-15T19:44:12.000Z
|
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import argparse
import pandas as pd
from tqdm.auto import tqdm
from datetime import datetime
import seaborn as sns
import matplotlib.pyplot as plt
from utils.functions import compute_exact_tau, compute_exact_tau_distr
from models.toy_gmm_multid import ToyGMMMultiDLoader
model_dict = {
'gmm': ToyGMMMultiDLoader
}
def main(d_obs, run, rep, alpha, sample_size_obs, n_sampled_true_tau, debug=False, seed=7, verbose=False,
marginal=False, size_marginal=1000, size_check=10000):
# Changing values if debugging
rep = rep if not debug else 2
n_sampled_true_tau = n_sampled_true_tau if not debug else 10
model_obj = model_dict[run](d_obs=d_obs, marginal=marginal, size_marginal=size_marginal)
# Get the correct functions
grid_param = model_obj.grid
gen_obs_func = model_obj.sample_sim
gen_sample_func = model_obj.generate_sample
or_func = model_obj.compute_exact_or
t0_grid = model_obj.pred_grid
tp_func = model_obj.compute_exact_prob
t0_val = model_obj.true_param
# Loop over repetitions and classifiers
# Each time we train the different classifiers, we build the intervals and we record
# whether the point is in or not.
np.random.seed(seed)
out_val = []
out_cols = ['d_obs', 'run', 'rep', 'classifier', 'sample_size_obs', 't0_true_val', 'theta_0_current', 'on_true_t0',
'in_true_interval', 'size_true_int', 'true_entropy']
pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s' % sample_size_obs)
for jj in range(rep):
# Creating sample to check entropy about
sample_check = gen_sample_func(sample_size=size_check, marginal=False)
theta_vec = sample_check[:, :model_obj.d]
x_vec = sample_check[:, (model_obj.d + 1):]
bern_vec = sample_check[:, model_obj.d]
true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)
entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1
else np.log(1 - true_prob_vec[kk])
for kk, el in enumerate(bern_vec)])
# TRUE CONFIDENCE INTERVAL
# print('------ Calculate true Confidence Interval')
# Generates samples for each t0 values, so to be able to check both coverage and power
x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)
# # Calculate the true LRT value
tau_obs = np.array([compute_exact_tau(
or_func=or_func, x_obs=x_obs, t0_val=theta_0, t1_linspace=grid_param) for theta_0 in t0_grid])
tau_distr = np.apply_along_axis(arr=t0_grid.reshape(-1, model_obj.d), axis=1,
func1d=lambda t0: compute_exact_tau_distr(
gen_obs_func=gen_obs_func, or_func=or_func, t0_val=t0,
t1_linspace=grid_param, n_sampled=n_sampled_true_tau,
sample_size_obs=sample_size_obs, d_obs=model_obj.d_obs))
assert tau_distr.shape == (t0_grid.shape[0], n_sampled_true_tau)
quantile_pred_tau = np.quantile(a=tau_distr, q=alpha, axis=1)
true_interval = (tau_obs > quantile_pred_tau).astype(int)
true_interval_size = (np.sum(true_interval) / true_interval.shape[0])
# At this point all it's left is to record
for kk, theta_0_current in enumerate(t0_grid):
out_val.append([
d_obs, run, jj, 'Exact', sample_size_obs,
t0_val, theta_0_current, int(t0_val == theta_0_current),
true_interval[kk], true_interval_size, entropy_est
])
pbar.update(1)
# Saving the results
out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)
out_dir = 'sims/classifier_power_multid/'
out_filename = 'truth_classifier_power_multid%s_%s_%srep_alpha%s_sampleobs%s_t0val%s_%ssampletau_%s.csv' % (
d_obs, run, rep, str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'), n_sampled_true_tau,
datetime.strftime(datetime.today(), '%Y-%m-%d')
)
out_df.to_csv(out_dir + out_filename)
# Print results
cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'in_true_interval', 'true_entropy', 'size_true_int']]
print(cov_df.groupby(['classifier']).agg({'in_true_interval': [np.average],
'size_true_int': [np.average, np.std],
'true_entropy': [np.average, np.std]}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', action="store", type=int, default=7,
help='Random State')
parser.add_argument('--d_obs', action="store", type=int, default=2,
help='Dimensionality of the observed data (feature space)')
parser.add_argument('--rep', action="store", type=int, default=10,
help='Number of Repetitions for calculating the Pinball loss')
parser.add_argument('--alpha', action="store", type=float, default=0.1,
help='Statistical confidence level')
parser.add_argument('--run', action="store", type=str, default='gmm',
help='Problem to run')
parser.add_argument('--debug', action='store_true', default=False,
help='If true, a very small value for the sample sizes is fit to make sure the'
'file can run quickly for debugging purposes')
parser.add_argument('--verbose', action='store_true', default=False,
help='If true, logs are printed to the terminal')
parser.add_argument('--sample_size_obs', action="store", type=int, default=10,
help='Sample size of the actual observed data.')
parser.add_argument('--n_sampled_true_tau', action="store", type=int, default=100,
help='Number of Monte Carlo samples for calculating distribution of tau sample.')
argument_parsed = parser.parse_args()
main(
d_obs=argument_parsed.d_obs,
run=argument_parsed.run,
rep=argument_parsed.rep,
alpha=argument_parsed.alpha,
debug=argument_parsed.debug,
sample_size_obs=argument_parsed.sample_size_obs,
seed=argument_parsed.seed,
verbose=argument_parsed.verbose,
n_sampled_true_tau=argument_parsed.n_sampled_true_tau
)
| 47.784173
| 119
| 0.647094
|
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import argparse
import pandas as pd
from tqdm.auto import tqdm
from datetime import datetime
import seaborn as sns
import matplotlib.pyplot as plt
from utils.functions import compute_exact_tau, compute_exact_tau_distr
from models.toy_gmm_multid import ToyGMMMultiDLoader
model_dict = {
'gmm': ToyGMMMultiDLoader
}
def main(d_obs, run, rep, alpha, sample_size_obs, n_sampled_true_tau, debug=False, seed=7, verbose=False,
marginal=False, size_marginal=1000, size_check=10000):
rep = rep if not debug else 2
n_sampled_true_tau = n_sampled_true_tau if not debug else 10
model_obj = model_dict[run](d_obs=d_obs, marginal=marginal, size_marginal=size_marginal)
grid_param = model_obj.grid
gen_obs_func = model_obj.sample_sim
gen_sample_func = model_obj.generate_sample
or_func = model_obj.compute_exact_or
t0_grid = model_obj.pred_grid
tp_func = model_obj.compute_exact_prob
t0_val = model_obj.true_param
np.random.seed(seed)
out_val = []
out_cols = ['d_obs', 'run', 'rep', 'classifier', 'sample_size_obs', 't0_true_val', 'theta_0_current', 'on_true_t0',
'in_true_interval', 'size_true_int', 'true_entropy']
pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s' % sample_size_obs)
for jj in range(rep):
sample_check = gen_sample_func(sample_size=size_check, marginal=False)
theta_vec = sample_check[:, :model_obj.d]
x_vec = sample_check[:, (model_obj.d + 1):]
bern_vec = sample_check[:, model_obj.d]
true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)
entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1
else np.log(1 - true_prob_vec[kk])
for kk, el in enumerate(bern_vec)])
x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)
ompute_exact_tau(
or_func=or_func, x_obs=x_obs, t0_val=theta_0, t1_linspace=grid_param) for theta_0 in t0_grid])
tau_distr = np.apply_along_axis(arr=t0_grid.reshape(-1, model_obj.d), axis=1,
func1d=lambda t0: compute_exact_tau_distr(
gen_obs_func=gen_obs_func, or_func=or_func, t0_val=t0,
t1_linspace=grid_param, n_sampled=n_sampled_true_tau,
sample_size_obs=sample_size_obs, d_obs=model_obj.d_obs))
assert tau_distr.shape == (t0_grid.shape[0], n_sampled_true_tau)
quantile_pred_tau = np.quantile(a=tau_distr, q=alpha, axis=1)
true_interval = (tau_obs > quantile_pred_tau).astype(int)
true_interval_size = (np.sum(true_interval) / true_interval.shape[0])
for kk, theta_0_current in enumerate(t0_grid):
out_val.append([
d_obs, run, jj, 'Exact', sample_size_obs,
t0_val, theta_0_current, int(t0_val == theta_0_current),
true_interval[kk], true_interval_size, entropy_est
])
pbar.update(1)
# Saving the results
out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)
out_dir = 'sims/classifier_power_multid/'
out_filename = 'truth_classifier_power_multid%s_%s_%srep_alpha%s_sampleobs%s_t0val%s_%ssampletau_%s.csv' % (
d_obs, run, rep, str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'), n_sampled_true_tau,
datetime.strftime(datetime.today(), '%Y-%m-%d')
)
out_df.to_csv(out_dir + out_filename)
# Print results
cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'in_true_interval', 'true_entropy', 'size_true_int']]
print(cov_df.groupby(['classifier']).agg({'in_true_interval': [np.average],
'size_true_int': [np.average, np.std],
'true_entropy': [np.average, np.std]}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', action="store", type=int, default=7,
help='Random State')
parser.add_argument('--d_obs', action="store", type=int, default=2,
help='Dimensionality of the observed data (feature space)')
parser.add_argument('--rep', action="store", type=int, default=10,
help='Number of Repetitions for calculating the Pinball loss')
parser.add_argument('--alpha', action="store", type=float, default=0.1,
help='Statistical confidence level')
parser.add_argument('--run', action="store", type=str, default='gmm',
help='Problem to run')
parser.add_argument('--debug', action='store_true', default=False,
help='If true, a very small value for the sample sizes is fit to make sure the'
'file can run quickly for debugging purposes')
parser.add_argument('--verbose', action='store_true', default=False,
help='If true, logs are printed to the terminal')
parser.add_argument('--sample_size_obs', action="store", type=int, default=10,
help='Sample size of the actual observed data.')
parser.add_argument('--n_sampled_true_tau', action="store", type=int, default=100,
help='Number of Monte Carlo samples for calculating distribution of tau sample.')
argument_parsed = parser.parse_args()
main(
d_obs=argument_parsed.d_obs,
run=argument_parsed.run,
rep=argument_parsed.rep,
alpha=argument_parsed.alpha,
debug=argument_parsed.debug,
sample_size_obs=argument_parsed.sample_size_obs,
seed=argument_parsed.seed,
verbose=argument_parsed.verbose,
n_sampled_true_tau=argument_parsed.n_sampled_true_tau
)
| true
| true
|
7908a673912cd234d35fbc0b6329a275be8f4d08
| 1,235
|
py
|
Python
|
setup.py
|
nbari/zunzuncito
|
5cd24b4f39f2ca76eeacdeae0bde99f65e2eac8e
|
[
"BSD-3-Clause"
] | 2
|
2020-01-18T15:49:07.000Z
|
2020-01-18T16:01:12.000Z
|
setup.py
|
nbari/zunzuncito
|
5cd24b4f39f2ca76eeacdeae0bde99f65e2eac8e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
nbari/zunzuncito
|
5cd24b4f39f2ca76eeacdeae0bde99f65e2eac8e
|
[
"BSD-3-Clause"
] | null | null | null |
import imp
from os import path
from setuptools import setup
VERSION = imp.load_source(
'version',
path.join('.',
'zunzuncito',
'version.py'))
VERSION = VERSION.__version__
readme = open('README.rst', 'r')
setup(
name='zunzuncito',
version=VERSION,
author='Nicolas Embriz',
author_email='nbari@dalmp.com',
description="A micro-framework for creating REST API's",
license='BSD',
keywords='wsgi web api framework rest http',
url='http://www.zunzun.io',
download_url='https://github.com/nbari/zunzuncito/tarball/master',
platforms="any",
packages=['zunzuncito'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
long_description=readme.read()
)
| 29.404762
| 78
| 0.622672
|
import imp
from os import path
from setuptools import setup
VERSION = imp.load_source(
'version',
path.join('.',
'zunzuncito',
'version.py'))
VERSION = VERSION.__version__
readme = open('README.rst', 'r')
setup(
name='zunzuncito',
version=VERSION,
author='Nicolas Embriz',
author_email='nbari@dalmp.com',
description="A micro-framework for creating REST API's",
license='BSD',
keywords='wsgi web api framework rest http',
url='http://www.zunzun.io',
download_url='https://github.com/nbari/zunzuncito/tarball/master',
platforms="any",
packages=['zunzuncito'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
long_description=readme.read()
)
| true
| true
|
7908a785d7acb0b0445712820c06ae8719506e8a
| 1,912
|
py
|
Python
|
L1Trigger/RegionalCaloTrigger/test/rctInputTest_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
L1Trigger/RegionalCaloTrigger/test/rctInputTest_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
L1Trigger/RegionalCaloTrigger/test/rctInputTest_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
# The following comments couldn't be translated into the new config version:
# untracked PSet maxEvents = {untracked int32 input = 2}
#include "Configuration/ReleaseValidation/data/Services.cff"
# include "Configuration/StandardSequences/data/FakeConditions.cff"
# untracked PSet options = {
# include "FWCore/Framework/test/cmsExceptionsFatalOption.cff"
# untracked bool makeTriggerResults = true
# }
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
#
# ecal trig prim producer
# # ecal tpg params
# es_module = EcalTrigPrimESProducer {
# untracked string DatabaseFile = "TPG.txt"
# #untracked string DatabaseFile = "TPG_RCT_internal.txt"
# }
#
process.load("FWCore.MessageService.MessageLogger_cfi")
# standard RCT configuration, including input scales
process.load("L1TriggerConfig.RCTConfigProducers.L1RCTConfig_cff")
# using standard scales
process.load("L1TriggerConfig.L1ScalesProducers.L1CaloScalesConfig_cff")
#include "L1TriggerConfig/L1ScalesProducers/data/L1CaloInputScalesConfig.cff"
process.load("L1Trigger.RegionalCaloTrigger.L1RCTTestAnalyzer_cfi")
process.load("L1Trigger.RegionalCaloTrigger.rctDigis_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(64)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('rct.root')
)
process.source = cms.Source("EmptySource")
process.rctInput = cms.EDProducer("RctInputTextToDigi",
inputFile = cms.FileInPath('L1Trigger/TextToDigi/test/data/rctTestInputFileElec.txt')
)
process.input = cms.Path(process.rctInput)
process.p4 = cms.Path(process.rctDigis*process.L1RCTTestAnalyzer)
process.schedule = cms.Schedule(process.input,process.p4)
process.L1RCTTestAnalyzer.ecalDigisLabel = 'rctInput'
process.L1RCTTestAnalyzer.hcalDigisLabel = 'rctInput'
process.rctDigis.ecalDigisLabel = 'rctInput'
process.rctDigis.hcalDigisLabel = 'rctInput'
| 32.965517
| 89
| 0.789749
|
# untracked PSet maxEvents = {untracked int32 input = 2}
#include "Configuration/ReleaseValidation/data/Services.cff"
# include "Configuration/StandardSequences/data/FakeConditions.cff"
# untracked PSet options = {
# include "FWCore/Framework/test/cmsExceptionsFatalOption.cff"
# untracked bool makeTriggerResults = true
# }
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
#
# ecal trig prim producer
# # ecal tpg params
# es_module = EcalTrigPrimESProducer {
# untracked string DatabaseFile = "TPG.txt"
# #untracked string DatabaseFile = "TPG_RCT_internal.txt"
# }
#
process.load("FWCore.MessageService.MessageLogger_cfi")
# standard RCT configuration, including input scales
process.load("L1TriggerConfig.RCTConfigProducers.L1RCTConfig_cff")
# using standard scales
process.load("L1TriggerConfig.L1ScalesProducers.L1CaloScalesConfig_cff")
#include "L1TriggerConfig/L1ScalesProducers/data/L1CaloInputScalesConfig.cff"
process.load("L1Trigger.RegionalCaloTrigger.L1RCTTestAnalyzer_cfi")
process.load("L1Trigger.RegionalCaloTrigger.rctDigis_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(64)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('rct.root')
)
process.source = cms.Source("EmptySource")
process.rctInput = cms.EDProducer("RctInputTextToDigi",
inputFile = cms.FileInPath('L1Trigger/TextToDigi/test/data/rctTestInputFileElec.txt')
)
process.input = cms.Path(process.rctInput)
process.p4 = cms.Path(process.rctDigis*process.L1RCTTestAnalyzer)
process.schedule = cms.Schedule(process.input,process.p4)
process.L1RCTTestAnalyzer.ecalDigisLabel = 'rctInput'
process.L1RCTTestAnalyzer.hcalDigisLabel = 'rctInput'
process.rctDigis.ecalDigisLabel = 'rctInput'
process.rctDigis.hcalDigisLabel = 'rctInput'
| true
| true
|
7908a85796979a0f88ff5fe3ea6e8f74ab606d4a
| 12,874
|
py
|
Python
|
tensorflow_datasets/audio/nsynth.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | 14
|
2019-03-30T02:11:29.000Z
|
2021-11-16T12:06:32.000Z
|
tensorflow_datasets/audio/nsynth.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/audio/nsynth.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | 10
|
2019-03-31T08:35:29.000Z
|
2021-09-01T06:28:43.000Z
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NSynth Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The NSynth Dataset is an audio dataset containing ~300k musical notes, each
with a unique pitch, timbre, and envelope. Each note is annotated with three
additional pieces of information based on a combination of human evaluation
and heuristic algorithms: Source, Family, and Qualities.
"""
_FULL_DESCRIPTION = """\
Full NSynth Dataset is split into train, valid, and test sets, with no
instruments overlapping between the train set and the valid/test sets.
"""
_GANSYNTH_DESCRIPTION = """\
NSynth Dataset limited to acoustic instruments in the MIDI pitch interval
[24, 84]. Uses alternate splits that have overlap in instruments (but not exact
notes) between the train set and valid/test sets. This variant was originally
introduced in the ICLR 2019 GANSynth paper (https://arxiv.org/abs/1902.08710).
"""
_F0_AND_LOUDNESS_ADDENDUM = """\
This version additionally contains estimates for F0 using CREPE
(Kim et al., 2018) and A-weighted perceptual loudness. Both signals are provided
at a frame rate of 250Hz.
"""
# From http://proceedings.mlr.press/v70/engel17a.html
_CITATION = """\
@InProceedings{pmlr-v70-engel17a,
title = {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders},
author = {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan},
booktitle = {Proceedings of the 34th International Conference on Machine Learning},
pages = {1068--1077},
year = {2017},
editor = {Doina Precup and Yee Whye Teh},
volume = {70},
series = {Proceedings of Machine Learning Research},
address = {International Convention Centre, Sydney, Australia},
month = {06--11 Aug},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf},
url = {http://proceedings.mlr.press/v70/engel17a.html},
}
"""
_NUM_SECS = 4
_AUDIO_RATE = 16000 # 16 kHz
_F0_AND_LOUDNESS_RATE = 250 # 250 Hz
_INSTRUMENT_FAMILIES = [
"bass", "brass", "flute", "guitar", "keyboard", "mallet", "organ", "reed",
"string", "synth_lead", "vocal"]
_INSTRUMENT_SOURCES = ["acoustic", "electronic", "synthetic"]
_QUALITIES = [
"bright",
"dark",
"distortion",
"fast_decay",
"long_release",
"multiphonic",
"nonlinear_env",
"percussive",
"reverb",
"tempo-synced"]
_BASE_DOWNLOAD_PATH = "http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-"
_SPLITS = ["train", "valid", "test"]
_SPLIT_SHARDS = {
"train": 512,
"valid": 32,
"test": 8,
}
class NsynthConfig(tfds.core.BuilderConfig):
"""BuilderConfig for NSynth Dataset."""
def __init__(self,
gansynth_subset=False,
estimate_f0_and_loudness=False,
**kwargs):
"""Constructs a NsynthConfig.
Args:
gansynth_subset: bool, whether to use the subset of the dataset introduced
in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses
acoustic-only instrument sources and limits the pitches to the interval
[24, 84]. The train and test splits are also modified so that
instruments (but not specific notes) overlap between them. See
https://arxiv.org/abs/1902.08710 for more details.
estimate_f0_and_loudness: bool, whether to estimate fundamental frequency
(F0) and loudness for the audio (at 250 Hz) and add them to the set of
features.
**kwargs: keyword arguments forwarded to super.
"""
name_parts = []
if gansynth_subset:
name_parts.append("gansynth_subset")
else:
name_parts.append("full")
if estimate_f0_and_loudness:
name_parts.append("f0_and_loudness")
super(NsynthConfig, self).__init__(
name=".".join(name_parts),
version=tfds.core.Version(
"1.1.0", experiments={tfds.core.Experiment.S3: False}),
**kwargs)
self.gansynth_subset = gansynth_subset
self.estimate_f0_and_loudness = estimate_f0_and_loudness
class Nsynth(tfds.core.BeamBasedBuilder):
"""A large-scale and high-quality dataset of annotated musical notes."""
BUILDER_CONFIGS = [
NsynthConfig(description=_FULL_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
description=_GANSYNTH_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
estimate_f0_and_loudness=True,
description=_GANSYNTH_DESCRIPTION + _F0_AND_LOUDNESS_ADDENDUM),
]
def _info(self):
features = {
"id":
tf.string,
"audio":
tfds.features.Tensor(
shape=(_AUDIO_RATE * _NUM_SECS,), dtype=tf.float32),
"pitch":
tfds.features.ClassLabel(num_classes=128),
"velocity":
tfds.features.ClassLabel(num_classes=128),
"instrument": {
# We read the list of labels in _split_generators.
"label": tfds.features.ClassLabel(num_classes=1006),
"family": tfds.features.ClassLabel(names=_INSTRUMENT_FAMILIES),
"source": tfds.features.ClassLabel(names=_INSTRUMENT_SOURCES),
},
"qualities": {quality: tf.bool for quality in _QUALITIES},
}
if self.builder_config.estimate_f0_and_loudness:
f0_and_ld_shape = (_F0_AND_LOUDNESS_RATE * _NUM_SECS + 1,)
features["f0"] = {
"hz":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"midi":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"confidence":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
features["loudness"] = {
"db":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage="https://g.co/magenta/nsynth-dataset",
citation=_CITATION,
metadata=tfds.core.BeamMetadataDict(),
)
def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {}
dl_urls["examples"] = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord.tar" % split
for split in _SPLITS
}
dl_urls["instrument_labels"] = (
_BASE_DOWNLOAD_PATH + "instrument_labels.txt")
if self.builder_config.gansynth_subset:
dl_urls["gansynth_splits"] = (
_BASE_DOWNLOAD_PATH + "gansynth_splits.csv")
dl_paths = dl_manager.download_and_extract(dl_urls)
with tf.io.gfile.GFile(dl_paths["instrument_labels"]) as f:
instrument_labels = f.read().strip().splitlines()
self.info.features["instrument"]["label"].names = instrument_labels
split_ids = {s: set() for s in _SPLITS}
split_dirs = {s: [dl_paths["examples"][s]] for s in _SPLITS}
if self.builder_config.gansynth_subset:
# Generator needs to see all original splits for each new split.
split_dirs = {s: dl_paths["examples"].values() for s in _SPLITS}
with tf.io.gfile.GFile(dl_paths["gansynth_splits"]) as f:
reader = csv.DictReader(f)
for row in reader:
split_ids[row["split"]].add(row["id"])
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={
"tfrecord_dirs": split_dirs[split],
"ids": split_ids[split],
"split": split,
})
for split in _SPLITS
]
def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split):
"""Build PCollection of examples for split."""
beam = tfds.core.lazy_imports.apache_beam
def _emit_base_example(ex):
"""Maps an input example to a TFDS example."""
beam.metrics.Metrics.counter(split, "base-examples").inc()
features = ex.features.feature
return {
"id": features["note_str"].bytes_list.value[0],
"audio":
np.array(features["audio"].float_list.value, dtype=np.float32),
"pitch":
features["pitch"].int64_list.value[0],
"velocity":
features["velocity"].int64_list.value[0],
"instrument": {
"label":
tf.compat.as_text(
features["instrument_str"].bytes_list.value[0]),
"family":
tf.compat.as_text(
features["instrument_family_str"].bytes_list.value[0]),
"source":
tf.compat.as_text(
features["instrument_source_str"].bytes_list.value[0])
},
"qualities": {
q: features["qualities"].int64_list.value[i]
for (i, q) in enumerate(_QUALITIES)
}
}
def _in_split(ex, split_ids):
if not split_ids or tf.compat.as_text(ex["id"]) in split_ids:
beam.metrics.Metrics.counter(split, "in-split").inc()
return True
return False
def _estimate_f0(ex):
"""Estimate the fundamental frequency using CREPE and add to example."""
ex = ex.copy()
beam.metrics.Metrics.counter(split, "estimate-f0").inc()
_, f0_hz, f0_confidence, _ = tfds.core.lazy_imports.crepe.predict(
ex["audio"],
sr=_AUDIO_RATE,
viterbi=True,
step_size=1000 / _F0_AND_LOUDNESS_RATE,
verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
# Set -infs introduced by hz_to_midi to 0.
f0_midi[f0_midi == -np.inf] = 0
# Set nans to 0 in confidence.
f0_confidence = np.nan_to_num(f0_confidence)
ex["f0"] = {
"hz": f0_hz.astype(np.float32),
"midi": f0_midi.astype(np.float32),
"confidence": f0_confidence.astype(np.float32),
}
return ex
def _compute_loudness(ex):
"""Compute loudness and add to example."""
ex = ex.copy()
beam.metrics.Metrics.counter(split, "compute-loudness").inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(
ex["audio"],
n_fft=n_fft,
hop_length=int(_AUDIO_RATE // _F0_AND_LOUDNESS_RATE))
loudness_db = librosa.perceptual_weighting(
np.abs(stft)**2,
librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft),
amin=amin,
top_db=top_db)
# Average across freq in linear scale.
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(
mean_loudness_amp,
amin=amin,
top_db=top_db)
ex["loudness"] = {"db": mean_loudness_db.astype(np.float32)}
return ex
examples = (
pipeline
| beam.Create([os.path.join(dir_, "*") for dir_ in tfrecord_dirs])
| beam.io.tfrecordio.ReadAllFromTFRecord(
coder=beam.coders.ProtoCoder(tf.train.Example))
| beam.Map(_emit_base_example)
| beam.Filter(_in_split, split_ids=ids))
if self.builder_config.estimate_f0_and_loudness:
examples = (
examples
| beam.Reshuffle()
| beam.Map(_estimate_f0)
| beam.Map(_compute_loudness))
if split == tfds.Split.TRAIN:
# Output mean and variance of loudness for TRAIN split.
loudness = examples | beam.Map(lambda x: np.mean(x["loudness"]["db"]))
loudness_mean = (
loudness
| "loudness_mean" >> beam.combiners.Mean.Globally())
loudness_variance = (
loudness
| beam.Map(lambda ld, ld_mean: (ld - ld_mean)**2,
ld_mean=beam.pvalue.AsSingleton(loudness_mean))
| "loudness_variance" >> beam.combiners.Mean.Globally())
self.info.metadata["loudness_db_mean"] = loudness_mean
self.info.metadata["loudness_db_variance"] = loudness_variance
return examples
| 36.573864
| 139
| 0.645332
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The NSynth Dataset is an audio dataset containing ~300k musical notes, each
with a unique pitch, timbre, and envelope. Each note is annotated with three
additional pieces of information based on a combination of human evaluation
and heuristic algorithms: Source, Family, and Qualities.
"""
_FULL_DESCRIPTION = """\
Full NSynth Dataset is split into train, valid, and test sets, with no
instruments overlapping between the train set and the valid/test sets.
"""
_GANSYNTH_DESCRIPTION = """\
NSynth Dataset limited to acoustic instruments in the MIDI pitch interval
[24, 84]. Uses alternate splits that have overlap in instruments (but not exact
notes) between the train set and valid/test sets. This variant was originally
introduced in the ICLR 2019 GANSynth paper (https://arxiv.org/abs/1902.08710).
"""
_F0_AND_LOUDNESS_ADDENDUM = """\
This version additionally contains estimates for F0 using CREPE
(Kim et al., 2018) and A-weighted perceptual loudness. Both signals are provided
at a frame rate of 250Hz.
"""
_CITATION = """\
@InProceedings{pmlr-v70-engel17a,
title = {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders},
author = {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan},
booktitle = {Proceedings of the 34th International Conference on Machine Learning},
pages = {1068--1077},
year = {2017},
editor = {Doina Precup and Yee Whye Teh},
volume = {70},
series = {Proceedings of Machine Learning Research},
address = {International Convention Centre, Sydney, Australia},
month = {06--11 Aug},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf},
url = {http://proceedings.mlr.press/v70/engel17a.html},
}
"""
_NUM_SECS = 4
_AUDIO_RATE = 16000
_F0_AND_LOUDNESS_RATE = 250
_INSTRUMENT_FAMILIES = [
"bass", "brass", "flute", "guitar", "keyboard", "mallet", "organ", "reed",
"string", "synth_lead", "vocal"]
_INSTRUMENT_SOURCES = ["acoustic", "electronic", "synthetic"]
_QUALITIES = [
"bright",
"dark",
"distortion",
"fast_decay",
"long_release",
"multiphonic",
"nonlinear_env",
"percussive",
"reverb",
"tempo-synced"]
_BASE_DOWNLOAD_PATH = "http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-"
_SPLITS = ["train", "valid", "test"]
_SPLIT_SHARDS = {
"train": 512,
"valid": 32,
"test": 8,
}
class NsynthConfig(tfds.core.BuilderConfig):
def __init__(self,
gansynth_subset=False,
estimate_f0_and_loudness=False,
**kwargs):
name_parts = []
if gansynth_subset:
name_parts.append("gansynth_subset")
else:
name_parts.append("full")
if estimate_f0_and_loudness:
name_parts.append("f0_and_loudness")
super(NsynthConfig, self).__init__(
name=".".join(name_parts),
version=tfds.core.Version(
"1.1.0", experiments={tfds.core.Experiment.S3: False}),
**kwargs)
self.gansynth_subset = gansynth_subset
self.estimate_f0_and_loudness = estimate_f0_and_loudness
class Nsynth(tfds.core.BeamBasedBuilder):
BUILDER_CONFIGS = [
NsynthConfig(description=_FULL_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
description=_GANSYNTH_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
estimate_f0_and_loudness=True,
description=_GANSYNTH_DESCRIPTION + _F0_AND_LOUDNESS_ADDENDUM),
]
def _info(self):
features = {
"id":
tf.string,
"audio":
tfds.features.Tensor(
shape=(_AUDIO_RATE * _NUM_SECS,), dtype=tf.float32),
"pitch":
tfds.features.ClassLabel(num_classes=128),
"velocity":
tfds.features.ClassLabel(num_classes=128),
"instrument": {
"label": tfds.features.ClassLabel(num_classes=1006),
"family": tfds.features.ClassLabel(names=_INSTRUMENT_FAMILIES),
"source": tfds.features.ClassLabel(names=_INSTRUMENT_SOURCES),
},
"qualities": {quality: tf.bool for quality in _QUALITIES},
}
if self.builder_config.estimate_f0_and_loudness:
f0_and_ld_shape = (_F0_AND_LOUDNESS_RATE * _NUM_SECS + 1,)
features["f0"] = {
"hz":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"midi":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"confidence":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
features["loudness"] = {
"db":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage="https://g.co/magenta/nsynth-dataset",
citation=_CITATION,
metadata=tfds.core.BeamMetadataDict(),
)
def _split_generators(self, dl_manager):
dl_urls = {}
dl_urls["examples"] = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord.tar" % split
for split in _SPLITS
}
dl_urls["instrument_labels"] = (
_BASE_DOWNLOAD_PATH + "instrument_labels.txt")
if self.builder_config.gansynth_subset:
dl_urls["gansynth_splits"] = (
_BASE_DOWNLOAD_PATH + "gansynth_splits.csv")
dl_paths = dl_manager.download_and_extract(dl_urls)
with tf.io.gfile.GFile(dl_paths["instrument_labels"]) as f:
instrument_labels = f.read().strip().splitlines()
self.info.features["instrument"]["label"].names = instrument_labels
split_ids = {s: set() for s in _SPLITS}
split_dirs = {s: [dl_paths["examples"][s]] for s in _SPLITS}
if self.builder_config.gansynth_subset:
split_dirs = {s: dl_paths["examples"].values() for s in _SPLITS}
with tf.io.gfile.GFile(dl_paths["gansynth_splits"]) as f:
reader = csv.DictReader(f)
for row in reader:
split_ids[row["split"]].add(row["id"])
return [
tfds.core.SplitGenerator(
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={
"tfrecord_dirs": split_dirs[split],
"ids": split_ids[split],
"split": split,
})
for split in _SPLITS
]
def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split):
beam = tfds.core.lazy_imports.apache_beam
def _emit_base_example(ex):
beam.metrics.Metrics.counter(split, "base-examples").inc()
features = ex.features.feature
return {
"id": features["note_str"].bytes_list.value[0],
"audio":
np.array(features["audio"].float_list.value, dtype=np.float32),
"pitch":
features["pitch"].int64_list.value[0],
"velocity":
features["velocity"].int64_list.value[0],
"instrument": {
"label":
tf.compat.as_text(
features["instrument_str"].bytes_list.value[0]),
"family":
tf.compat.as_text(
features["instrument_family_str"].bytes_list.value[0]),
"source":
tf.compat.as_text(
features["instrument_source_str"].bytes_list.value[0])
},
"qualities": {
q: features["qualities"].int64_list.value[i]
for (i, q) in enumerate(_QUALITIES)
}
}
def _in_split(ex, split_ids):
if not split_ids or tf.compat.as_text(ex["id"]) in split_ids:
beam.metrics.Metrics.counter(split, "in-split").inc()
return True
return False
def _estimate_f0(ex):
ex = ex.copy()
beam.metrics.Metrics.counter(split, "estimate-f0").inc()
_, f0_hz, f0_confidence, _ = tfds.core.lazy_imports.crepe.predict(
ex["audio"],
sr=_AUDIO_RATE,
viterbi=True,
step_size=1000 / _F0_AND_LOUDNESS_RATE,
verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
f0_midi[f0_midi == -np.inf] = 0
f0_confidence = np.nan_to_num(f0_confidence)
ex["f0"] = {
"hz": f0_hz.astype(np.float32),
"midi": f0_midi.astype(np.float32),
"confidence": f0_confidence.astype(np.float32),
}
return ex
def _compute_loudness(ex):
ex = ex.copy()
beam.metrics.Metrics.counter(split, "compute-loudness").inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(
ex["audio"],
n_fft=n_fft,
hop_length=int(_AUDIO_RATE // _F0_AND_LOUDNESS_RATE))
loudness_db = librosa.perceptual_weighting(
np.abs(stft)**2,
librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft),
amin=amin,
top_db=top_db)
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(
mean_loudness_amp,
amin=amin,
top_db=top_db)
ex["loudness"] = {"db": mean_loudness_db.astype(np.float32)}
return ex
examples = (
pipeline
| beam.Create([os.path.join(dir_, "*") for dir_ in tfrecord_dirs])
| beam.io.tfrecordio.ReadAllFromTFRecord(
coder=beam.coders.ProtoCoder(tf.train.Example))
| beam.Map(_emit_base_example)
| beam.Filter(_in_split, split_ids=ids))
if self.builder_config.estimate_f0_and_loudness:
examples = (
examples
| beam.Reshuffle()
| beam.Map(_estimate_f0)
| beam.Map(_compute_loudness))
if split == tfds.Split.TRAIN:
loudness = examples | beam.Map(lambda x: np.mean(x["loudness"]["db"]))
loudness_mean = (
loudness
| "loudness_mean" >> beam.combiners.Mean.Globally())
loudness_variance = (
loudness
| beam.Map(lambda ld, ld_mean: (ld - ld_mean)**2,
ld_mean=beam.pvalue.AsSingleton(loudness_mean))
| "loudness_variance" >> beam.combiners.Mean.Globally())
self.info.metadata["loudness_db_mean"] = loudness_mean
self.info.metadata["loudness_db_variance"] = loudness_variance
return examples
| true
| true
|
7908a929017732d32e10da50f0c1cf6c5e398a86
| 392
|
py
|
Python
|
Module 2/Chapter 4/Chapter 4/probe_req.py
|
kongjiexi/Python-Penetration-Testing-for-Developers
|
8cfecc3e968e7b063b4f4053dd4e05ea281e81be
|
[
"MIT"
] | 34
|
2016-11-16T15:37:47.000Z
|
2022-01-15T06:19:27.000Z
|
Module 2/Chapter 4/Chapter 4/probe_req.py
|
kongjiexi/Python-Penetration-Testing-for-Developers-Code
|
8cfecc3e968e7b063b4f4053dd4e05ea281e81be
|
[
"MIT"
] | null | null | null |
Module 2/Chapter 4/Chapter 4/probe_req.py
|
kongjiexi/Python-Penetration-Testing-for-Developers-Code
|
8cfecc3e968e7b063b4f4053dd4e05ea281e81be
|
[
"MIT"
] | 35
|
2016-10-30T10:13:04.000Z
|
2022-03-26T21:36:49.000Z
|
from scapy.all import *
interface ='mon0'
probe_req = []
ap_name = raw_input("Please enter the AP name ")
def probesniff(fm):
if fm.haslayer(Dot11ProbeReq):
client_name = fm.info
if client_name == ap_name :
if fm.addr2 not in probe_req:
print "New Probe Request: ", client_name
print "MAC ", fm.addr2
probe_req.append(fm.addr2)
sniff(iface= interface,prn=probesniff)
| 26.133333
| 48
| 0.704082
|
from scapy.all import *
interface ='mon0'
probe_req = []
ap_name = raw_input("Please enter the AP name ")
def probesniff(fm):
if fm.haslayer(Dot11ProbeReq):
client_name = fm.info
if client_name == ap_name :
if fm.addr2 not in probe_req:
print "New Probe Request: ", client_name
print "MAC ", fm.addr2
probe_req.append(fm.addr2)
sniff(iface= interface,prn=probesniff)
| false
| true
|
7908a996941ee66bcb318dac42d7d598b81247e9
| 1,311
|
py
|
Python
|
build_definitions/openldap.py
|
d-uspenskiy/yugabyte-db-thirdparty
|
1cd96069797c6ae4a680fc75806c31f3411c4dab
|
[
"CC-BY-3.0"
] | null | null | null |
build_definitions/openldap.py
|
d-uspenskiy/yugabyte-db-thirdparty
|
1cd96069797c6ae4a680fc75806c31f3411c4dab
|
[
"CC-BY-3.0"
] | null | null | null |
build_definitions/openldap.py
|
d-uspenskiy/yugabyte-db-thirdparty
|
1cd96069797c6ae4a680fc75806c31f3411c4dab
|
[
"CC-BY-3.0"
] | null | null | null |
#
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from build_definitions import *
class OpenLDAPDependency(Dependency):
def __init__(self):
super(OpenLDAPDependency, self).__init__(
'openldap',
'2_4_54',
'https://github.com/yugabyte/openldap/archive/OPENLDAP_REL_ENG_{}.tar.gz',
BUILD_GROUP_COMMON)
self.copy_sources = True
def build(self, builder):
# build client only
disabled_features = ('slapd', 'bdb', 'hdb', 'mdb', 'monitor', 'relay', 'syncprov')
builder.build_with_configure(
builder.log_prefix(self), ['--disable-' + feature for feature in disabled_features])
| 35.432432
| 99
| 0.694889
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from build_definitions import *
class OpenLDAPDependency(Dependency):
def __init__(self):
super(OpenLDAPDependency, self).__init__(
'openldap',
'2_4_54',
'https://github.com/yugabyte/openldap/archive/OPENLDAP_REL_ENG_{}.tar.gz',
BUILD_GROUP_COMMON)
self.copy_sources = True
def build(self, builder):
disabled_features = ('slapd', 'bdb', 'hdb', 'mdb', 'monitor', 'relay', 'syncprov')
builder.build_with_configure(
builder.log_prefix(self), ['--disable-' + feature for feature in disabled_features])
| true
| true
|
7908a9f23999ebd26148d524c16c927e4a8bf221
| 391
|
py
|
Python
|
setup.py
|
VisionSystemsInc/terra
|
a5312f38d5927683b42f2f659174d188db567249
|
[
"MIT"
] | null | null | null |
setup.py
|
VisionSystemsInc/terra
|
a5312f38d5927683b42f2f659174d188db567249
|
[
"MIT"
] | 38
|
2019-10-17T18:47:56.000Z
|
2021-12-07T16:17:44.000Z
|
setup.py
|
VisionSystemsInc/terra
|
a5312f38d5927683b42f2f659174d188db567249
|
[
"MIT"
] | 2
|
2019-10-08T22:00:50.000Z
|
2019-10-23T18:59:24.000Z
|
from distutils.core import setup
extra_requires = {
'celery': ["celery[redis]"],
'flower': ["flower"]
}
setup(name="terra",
packages=["terra"],
description="Terra",
extra_requires=extra_requires,
install_requires=[
"pyyaml",
"jstyleson",
# I use signal and task from celery, no matter what
"celery",
"filelock"
]
)
| 19.55
| 59
| 0.578005
|
from distutils.core import setup
extra_requires = {
'celery': ["celery[redis]"],
'flower': ["flower"]
}
setup(name="terra",
packages=["terra"],
description="Terra",
extra_requires=extra_requires,
install_requires=[
"pyyaml",
"jstyleson",
"celery",
"filelock"
]
)
| true
| true
|
7908aa62796accdf12f544b8ee7c009158c78b66
| 3,878
|
py
|
Python
|
mysite/guestbook/guestbook.py
|
wcl6005/testgit
|
d747a73eb4a6c4e3594f453f35d7b22f73985482
|
[
"Apache-2.0"
] | null | null | null |
mysite/guestbook/guestbook.py
|
wcl6005/testgit
|
d747a73eb4a6c4e3594f453f35d7b22f73985482
|
[
"Apache-2.0"
] | null | null | null |
mysite/guestbook/guestbook.py
|
wcl6005/testgit
|
d747a73eb4a6c4e3594f453f35d7b22f73985482
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# 留言板
# 1、新建目录下一定要有__init__.py文件,否则不能被其它文件引用、不能沿路径读写文件。from ... 。
# 2、urls.py中,设置第一级路由名ask。 在.../mysite/mysite/urls.py中 url(r'^ask/', include('account.ask.urls')),
# 3、admin.py中,设置数据库显示。在.../mysite/account/admin.py中 @admin.register(Technologyask) ...
# 4、templates中,增加模板文件目录/ask
import datetime
import os
import json
from django.shortcuts import render
from django.http.response import HttpResponseRedirect,HttpResponse
from . models import Guestbook,Reply
from django.contrib.auth.decorators import login_required #使用注意在settings.py中设置 LOGIN_URL = '/login/'
from django.contrib.auth.models import User
from myAPI.pageAPI import djangoPage
from django.contrib import messages
PAGE_NUM = 20 #每页显示数
# http://localhost:9000/guestbook/reply/
#@login_required
def reply(request):
if request.method != 'POST':
return render(request, 'guestbook/reply.html', context=locals())
title = request.POST['title']
content = request.POST['content']
Guestbook.objects.filter(title=title).update(state=1)#更改回答状态
if request.user.username == 'admin': #admin回复
Reply.objects.filter(title=title).update(content=content )
Reply.objects.filter(title=title).update(username = 'admin' )
Reply.objects.filter(title=title).update(date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") )
return HttpResponseRedirect('/guestbook/showreply/')
@login_required
def gettitle(request):
title = request.GET.get('title','')
if title == '':
return HttpResponse('no')
return render(request, 'guestbook/reply.html', context=locals())
# http://localhost:9000/guestbook/create/
@login_required
def create(request):
if request.method != 'POST':
return render(request, 'guestbook/create.html', context=locals())
title = request.POST['title']
content = request.POST['content']
istitle = Guestbook.objects.filter(title = title)
if istitle:
messages.info(request, '告警:标题 '+ title + '已经被使用!')
return HttpResponseRedirect('/guestbook/show/')
if content:
guestbooks = Guestbook(username=request.user,title=title,content=content)
guestbooks.save()
guestbookname = Guestbook.objects.get(title=title).username
replys = Reply(guestbookname=guestbookname,title=title)
replys.save()
else:
messages.info(request,'告警:留言内容为空!')
return HttpResponseRedirect('/guestbook/show/')
# http://localhost:9000/guestbook/show/
@login_required
def show(request, page):
if request.user.is_superuser:
guestbooks = Guestbook.objects.filter().order_by('-date','-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM) #调用分页函数
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showall.html', context=locals())
guestbooks = Guestbook.objects.filter(username=request.user.username).order_by('-date', '-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM) #调用分页函数
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/show.html', context=locals())
# http://localhost:9000/guestbook/showreply/
@login_required
def showreply(request, page):
title = request.GET.get('title','')
if title != '':
replys = Reply.objects.filter(title=title)
else:
replys = Reply.objects.filter(username=request.user).order_by('-date', '-id')
replys, pageList, paginator, page = djangoPage(replys,page,PAGE_NUM) #调用分页函数
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showreply.html', context=locals())
| 43.088889
| 114
| 0.690304
|
import datetime
import os
import json
from django.shortcuts import render
from django.http.response import HttpResponseRedirect,HttpResponse
from . models import Guestbook,Reply
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from myAPI.pageAPI import djangoPage
from django.contrib import messages
PAGE_NUM = 20
def reply(request):
if request.method != 'POST':
return render(request, 'guestbook/reply.html', context=locals())
title = request.POST['title']
content = request.POST['content']
Guestbook.objects.filter(title=title).update(state=1)
if request.user.username == 'admin':
Reply.objects.filter(title=title).update(content=content )
Reply.objects.filter(title=title).update(username = 'admin' )
Reply.objects.filter(title=title).update(date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") )
return HttpResponseRedirect('/guestbook/showreply/')
@login_required
def gettitle(request):
title = request.GET.get('title','')
if title == '':
return HttpResponse('no')
return render(request, 'guestbook/reply.html', context=locals())
@login_required
def create(request):
if request.method != 'POST':
return render(request, 'guestbook/create.html', context=locals())
title = request.POST['title']
content = request.POST['content']
istitle = Guestbook.objects.filter(title = title)
if istitle:
messages.info(request, '告警:标题 '+ title + '已经被使用!')
return HttpResponseRedirect('/guestbook/show/')
if content:
guestbooks = Guestbook(username=request.user,title=title,content=content)
guestbooks.save()
guestbookname = Guestbook.objects.get(title=title).username
replys = Reply(guestbookname=guestbookname,title=title)
replys.save()
else:
messages.info(request,'告警:留言内容为空!')
return HttpResponseRedirect('/guestbook/show/')
@login_required
def show(request, page):
if request.user.is_superuser:
guestbooks = Guestbook.objects.filter().order_by('-date','-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM)
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showall.html', context=locals())
guestbooks = Guestbook.objects.filter(username=request.user.username).order_by('-date', '-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM)
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/show.html', context=locals())
@login_required
def showreply(request, page):
title = request.GET.get('title','')
if title != '':
replys = Reply.objects.filter(title=title)
else:
replys = Reply.objects.filter(username=request.user).order_by('-date', '-id')
replys, pageList, paginator, page = djangoPage(replys,page,PAGE_NUM)
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showreply.html', context=locals())
| true
| true
|
7908ab9717e321ad87ee34f4588cc083f1bd359b
| 12,701
|
py
|
Python
|
server/src/experiments/ud_xilinx/watertank_simulation.py
|
romainrossi/weblabdeusto
|
494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f
|
[
"BSD-2-Clause"
] | 15
|
2015-03-12T12:15:41.000Z
|
2021-12-20T17:53:24.000Z
|
server/src/experiments/ud_xilinx/watertank_simulation.py
|
romainrossi/weblabdeusto
|
494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f
|
[
"BSD-2-Clause"
] | 44
|
2015-01-07T09:22:05.000Z
|
2017-01-31T22:44:21.000Z
|
server/src/experiments/ud_xilinx/watertank_simulation.py
|
romainrossi/weblabdeusto
|
494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f
|
[
"BSD-2-Clause"
] | 22
|
2015-01-13T13:55:48.000Z
|
2021-12-16T17:07:00.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Luis Rodriguez <luis.rodriguez@opendeusto.es>
#
import threading
import time
import json
class Watertank(object):
"""
Watertank Model
Output example:
{"water": 0.0, "inputs": [0.5, 0.5], "temperatures": [716, 20], "outputs": [1.0]}
Changes that have been applied lately to this model (Dec 2015)
- There is no longer a separate temperatures mode. Now there is a single model with temperatures.
- There are no longer temperature working ranges, temperature warnings, or temperature overloads. The
model will not prevent the pumps from working. Instead, the temperature will increase indefinitely. The experiment
client can thus deal with temperatures however it wishes (and it can in fact ignore them), with no effect.
- As a result of the previous change, temperature is no longer reported as in the [0,1] range according to the range.
Now it is reported in raw form.
"""
def __init__(self, tank_capacity, inputs, outputs, water_level):
self.initialize(tank_capacity, inputs, outputs, water_level)
def initialize(self, tank_capacity, inputs, outputs, water_level):
"""
Initializes the simulation with the specified data.
@param tank_capacity Capacity of the water tank, in liters.
@param Array containing the flow volume of the inputs (such as water pumps), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param Array containing the outputs (such as a water hose or evaporation), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param water_level The starting water level. Value from 0 to 1.
"""
self.tank_capacity = tank_capacity
self.inputs = inputs
self.outputs = outputs
self.current_volume = water_level * tank_capacity
self.firstPumpTemperature = 20
self.secondPumpTemperature = 20
self.firstPumpWorkRange = [20, 200]
self.secondPumpWorkRange = [20, 200]
self.pumpTemperatureVariationPerSeconds = 6 # Enough for 30 seconds?
self.simlock = threading.RLock()
self._thread = None
self._autoupdating = False
self._autoupdating_interval = 1000
def update(self, delta):
"""
Updates the simulation. Can be done automatically if the autoupdater is used.
@param delta Delta in seconds.
@see autoupdater_start
"""
total_output = 0
for out in self.outputs:
total_output += out * delta
# Calculates how much the pumps are putting in.
total_input = 0
# Handle inputs
pump1, pump2 = self.inputs
# If the first pump is turned on we increase the temperature and the total water input
if pump1 > 0:
# We multiply by 1.1 so that its temperature raises faster.
self.firstPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds * 1.1
total_input += pump1 * delta
else:
self.firstPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.firstPumpTemperature = max(20, self.firstPumpTemperature)
total_input -= pump1 * delta
# If the second pump is turned on we increase the temperature and the total water input
if pump2 > 0:
self.secondPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds
total_input += pump2 * delta
else:
self.secondPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.secondPumpTemperature = max(20, self.secondPumpTemperature)
total_input -= pump2 * delta
increment = total_input - total_output
with self.simlock:
self.current_volume += increment
# Ensure the volume stays realistic
if self.current_volume >= self.tank_capacity:
self.current_volume = self.tank_capacity
elif self.current_volume < 0:
self.current_volume = 0.0
def t_updater(self):
"""
This internal method is used by the autoupdating thread to update
the simulation every few seconds (specified as the autoupdater interval).
"""
while self._autoupdating:
time.sleep(self._autoupdating_interval)
self.update(self._autoupdating_interval)
def autoupdater_start(self, interval):
"""
Starts the autoupdating thread. That is, a thread that will call update
every so often. If started, it should eventually be stopped. Otherwise,
it will run forever in the background.
@param interval Interval between updates, in seconds.
@see autoupdater_stop
"""
self._autoupdating = True
self._autoupdating_interval = interval
self._thread = threading.Thread(None, self.t_updater)
self._thread.start()
def autoupdater_stop(self):
"""
Stops the autoupdating thread. This method is non-blocking. It will signal
the thread to stop, but may take a while before it *really* does stop.
There is a blocking version of this method.
@see autoupdater_join
"""
self._autoupdating = False
def autoupdater_join(self):
"""
Stops the autoupdating thread, and joins that thread until it really does stop.
May block forever if for some reason the thread won't stop, but that
should not happen.
"""
self._autoupdating = False
self._thread.join(0)
def set_input(self, input_number, input_flow):
"""
Sets the value for an input in the simulation.
@param input_number Number identifying the input. The input should exist.
@param input_flow New flow of the input, in liters per second.
"""
with self.simlock:
self.inputs[input_number] = input_flow
def set_output(self, output_number, output_flow):
"""
Sets the value for an output in the simulation.
@param output_number Number identifying the output. The output should exist.
@param output_flow New flow of the output, in liters per second.
"""
with self.simlock:
self.outputs[output_number] = output_flow
def set_inputs(self, inputs):
"""
Redefines the whole array of inputs.
@param inputs Array containing the flow of every input.
"""
with self.simlock:
self.inputs = inputs
def set_outputs(self, outputs):
"""
Redefines the whole array of outputs.
@param outputs Array containing the flow of every output.
"""
with self.simlock:
self.outputs = outputs
def get_temperatures(self):
"""
Get temperatures.
:return:
"""
return [self.firstPumpTemperature, self.secondPumpTemperature]
def get_water_volume(self):
"""
Gets the current water volume in liters. It will vary dynamically according to the
simulation's state.
"""
with self.simlock:
return self.current_volume
def get_water_level(self):
"""
Gets the current water level, as a number from 0 to 1 (empty to full). It will vary dynamically
according to the simulation's state.
"""
with self.simlock:
return 1.0 * self.current_volume / self.tank_capacity
def get_json_state(self, input_capacities, output_capacities):
"""
Gets a json-encoded description of the simulation's state.
As of now, it takes output and input capacities as arguments because the JSON state
is described through relative values. (For instance, first output at 0.3 capacity).
@param input_capacities An array containing the maximum capacities of the input.
@param output_capacities An array containing the maximum capacities of the output.
"""
if len(self.inputs) != len(input_capacities):
return "{}"
inputs = []
for inp, cap in zip(self.inputs, input_capacities):
inputs.append(1.0 * inp / cap)
outputs = []
for inp, cap in zip(self.outputs, output_capacities):
outputs.append(1.0 * inp / cap)
state = {"water": self.get_water_level(), "inputs": inputs, "outputs": outputs}
# Report the RAW temperature
temperatures = [0, 0]
temperatures[0] = self.firstPumpTemperature
temperatures[1] = self.secondPumpTemperature
state["temperatures"] = temperatures
return json.dumps(state)
if __name__ == '__main__':
from mock import patch
import unittest
def fake_sleep(t):
# TODO
a = [1 for i in range(100000)] # very fast kludge to add minor delay
b = len(a)
pass
class TestWatertankSimulation(unittest.TestCase):
def test_nothing(self):
pass
def _get_state(self, w):
js = w.get_json_state([20, 20], [100])
d = json.loads(js)
return d
@patch("time.sleep", fake_sleep)
def test_waterlevel_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
initial_level = self._get_state(w)["water"]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
other_level = self._get_state(w)["water"]
# Check that the water level did increase
self.assertGreater(other_level, initial_level)
w.set_outputs([400])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
dec_level = self._get_state(w)["water"]
# Check that the water level did decrease
self.assertGreater(other_level, dec_level)
@patch("time.sleep", fake_sleep)
def test_temperature_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
t0 = self._get_state(w)["temperatures"][0]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t1 = self._get_state(w)["temperatures"][0]
# Check that the water level did increase
self.assertGreater(t1, t0)
w.set_inputs([0, 0])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t2 = self._get_state(w)["temperatures"][0]
# Check that the water level did decrease
self.assertGreater(t1, t2)
# @patch("time.sleep", fake_sleep)
# def test_first(self):
# w = Watertank(1000, [100, 100], [100], 0.5)
# w.autoupdater_start(1)
#
# i = 0
# while (i < 15):
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# time.sleep(0.5)
# i += 1
#
# print "...."
# i = 0
# w.set_outputs([100])
# w.set_inputs([10, 10])
# while (i < 30):
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# time.sleep(0.5)
# i += 1
#
# w.autoupdater_join()
#
# @patch("time.sleep", fake_sleep)
# def test_second(self):
# w = Watertank(1000, [100, 100], [100], 0.5)
#
# i = 0
# while i < 15:
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# w.update(1)
# i += 1
#
# print "...."
# i = 0
# w.set_outputs([100])
# w.set_inputs([10, 10])
# while i < 15:
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# w.update(1)
# i += 1
unittest.main()
| 34.988981
| 123
| 0.596016
|
import threading
import time
import json
class Watertank(object):
def __init__(self, tank_capacity, inputs, outputs, water_level):
self.initialize(tank_capacity, inputs, outputs, water_level)
def initialize(self, tank_capacity, inputs, outputs, water_level):
self.tank_capacity = tank_capacity
self.inputs = inputs
self.outputs = outputs
self.current_volume = water_level * tank_capacity
self.firstPumpTemperature = 20
self.secondPumpTemperature = 20
self.firstPumpWorkRange = [20, 200]
self.secondPumpWorkRange = [20, 200]
self.pumpTemperatureVariationPerSeconds = 6
self.simlock = threading.RLock()
self._thread = None
self._autoupdating = False
self._autoupdating_interval = 1000
def update(self, delta):
total_output = 0
for out in self.outputs:
total_output += out * delta
total_input = 0
pump1, pump2 = self.inputs
if pump1 > 0:
self.firstPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds * 1.1
total_input += pump1 * delta
else:
self.firstPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.firstPumpTemperature = max(20, self.firstPumpTemperature)
total_input -= pump1 * delta
if pump2 > 0:
self.secondPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds
total_input += pump2 * delta
else:
self.secondPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.secondPumpTemperature = max(20, self.secondPumpTemperature)
total_input -= pump2 * delta
increment = total_input - total_output
with self.simlock:
self.current_volume += increment
if self.current_volume >= self.tank_capacity:
self.current_volume = self.tank_capacity
elif self.current_volume < 0:
self.current_volume = 0.0
def t_updater(self):
while self._autoupdating:
time.sleep(self._autoupdating_interval)
self.update(self._autoupdating_interval)
def autoupdater_start(self, interval):
self._autoupdating = True
self._autoupdating_interval = interval
self._thread = threading.Thread(None, self.t_updater)
self._thread.start()
def autoupdater_stop(self):
self._autoupdating = False
def autoupdater_join(self):
self._autoupdating = False
self._thread.join(0)
def set_input(self, input_number, input_flow):
with self.simlock:
self.inputs[input_number] = input_flow
def set_output(self, output_number, output_flow):
with self.simlock:
self.outputs[output_number] = output_flow
def set_inputs(self, inputs):
with self.simlock:
self.inputs = inputs
def set_outputs(self, outputs):
with self.simlock:
self.outputs = outputs
def get_temperatures(self):
return [self.firstPumpTemperature, self.secondPumpTemperature]
def get_water_volume(self):
with self.simlock:
return self.current_volume
def get_water_level(self):
with self.simlock:
return 1.0 * self.current_volume / self.tank_capacity
def get_json_state(self, input_capacities, output_capacities):
if len(self.inputs) != len(input_capacities):
return "{}"
inputs = []
for inp, cap in zip(self.inputs, input_capacities):
inputs.append(1.0 * inp / cap)
outputs = []
for inp, cap in zip(self.outputs, output_capacities):
outputs.append(1.0 * inp / cap)
state = {"water": self.get_water_level(), "inputs": inputs, "outputs": outputs}
temperatures = [0, 0]
temperatures[0] = self.firstPumpTemperature
temperatures[1] = self.secondPumpTemperature
state["temperatures"] = temperatures
return json.dumps(state)
if __name__ == '__main__':
from mock import patch
import unittest
def fake_sleep(t):
a = [1 for i in range(100000)]
b = len(a)
pass
class TestWatertankSimulation(unittest.TestCase):
def test_nothing(self):
pass
def _get_state(self, w):
js = w.get_json_state([20, 20], [100])
d = json.loads(js)
return d
@patch("time.sleep", fake_sleep)
def test_waterlevel_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
initial_level = self._get_state(w)["water"]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
other_level = self._get_state(w)["water"]
self.assertGreater(other_level, initial_level)
w.set_outputs([400])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
dec_level = self._get_state(w)["water"]
self.assertGreater(other_level, dec_level)
@patch("time.sleep", fake_sleep)
def test_temperature_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
t0 = self._get_state(w)["temperatures"][0]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t1 = self._get_state(w)["temperatures"][0]
self.assertGreater(t1, t0)
w.set_inputs([0, 0])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t2 = self._get_state(w)["temperatures"][0]
self.assertGreater(t1, t2)
unittest.main()
| true
| true
|
7908ab996e67d75b3c62fc83428b21438d8c51b3
| 21,609
|
py
|
Python
|
src/pip/_internal/index/collector.py
|
NeilBotelho/pip
|
d01bfcfaa13a4f06fa0ce61fa18cf06012f2e78f
|
[
"MIT"
] | null | null | null |
src/pip/_internal/index/collector.py
|
NeilBotelho/pip
|
d01bfcfaa13a4f06fa0ce61fa18cf06012f2e78f
|
[
"MIT"
] | 1
|
2021-10-04T12:25:25.000Z
|
2021-10-05T07:30:54.000Z
|
src/pip/_internal/index/collector.py
|
NeilBotelho/pip
|
d01bfcfaa13a4f06fa0ce61fa18cf06012f2e78f
|
[
"MIT"
] | 1
|
2020-06-01T19:13:16.000Z
|
2020-06-01T19:13:16.000Z
|
"""
The main purpose of this module is to expose LinkCollector.collect_links().
"""
import cgi
import functools
import itertools
import logging
import mimetypes
import os
import re
from collections import OrderedDict
from pip._vendor import html5lib, requests
from pip._vendor.distlib.compat import unescape
from pip._vendor.requests.exceptions import HTTPError, RetryError, SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.models.link import Link
from pip._internal.utils.filetypes import ARCHIVE_EXTENSIONS
from pip._internal.utils.misc import pairwise, redact_auth_from_url
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url, url_to_path
from pip._internal.vcs import is_url, vcs
if MYPY_CHECK_RUNNING:
from typing import (
Callable, Iterable, List, MutableMapping, Optional,
Protocol, Sequence, Tuple, TypeVar, Union,
)
import xml.etree.ElementTree
from pip._vendor.requests import Response
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.session import PipSession
HTMLElement = xml.etree.ElementTree.Element
ResponseHeaders = MutableMapping[str, str]
# Used in the @lru_cache polyfill.
F = TypeVar('F')
class LruCache(Protocol):
def __call__(self, maxsize=None):
# type: (Optional[int]) -> Callable[[F], F]
raise NotImplementedError
logger = logging.getLogger(__name__)
# Fallback to noop_lru_cache in Python 2
# TODO: this can be removed when python 2 support is dropped!
def noop_lru_cache(maxsize=None):
# type: (Optional[int]) -> Callable[[F], F]
def _wrapper(f):
# type: (F) -> F
return f
return _wrapper
_lru_cache = getattr(functools, "lru_cache", noop_lru_cache) # type: LruCache
def _match_vcs_scheme(url):
# type: (str) -> Optional[str]
"""Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
"""
for scheme in vcs.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
return scheme
return None
def _is_url_like_archive(url):
# type: (str) -> bool
"""Return whether the URL looks like an archive.
"""
filename = Link(url).filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
return True
return False
class _NotHTML(Exception):
def __init__(self, content_type, request_desc):
# type: (str, str) -> None
super(_NotHTML, self).__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response):
# type: (Response) -> None
"""Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
"""
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url, session):
# type: (str, PipSession) -> None
"""Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
_ensure_html_header(resp)
def _get_html_response(url, session):
# type: (str, PipSession) -> Response
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp
def _get_encoding_from_headers(headers):
# type: (ResponseHeaders) -> Optional[str]
"""Determine if we have any encoding information in our headers.
"""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None
def _determine_base_url(document, page_url):
# type: (HTMLElement, str) -> str
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _clean_url_path_part(part):
# type: (str) -> str
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib_parse.quote(urllib_parse.unquote(part))
def _clean_file_url_path(part):
# type: (str) -> str
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib_request.pathname2url(urllib_request.url2pathname(part))
# percent-encoded: /
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
def _clean_url_path(path, is_local_path):
# type: (str, bool) -> str
"""
Clean the path portion of a URL.
"""
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return ''.join(cleaned_parts)
def _clean_link(url):
# type: (str) -> str
"""
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
"""
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`.
result = urllib_parse.urlparse(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib_parse.urlunparse(result._replace(path=path))
def _create_link_from_element(
anchor, # type: HTMLElement
page_url, # type: str
base_url, # type: str
):
# type: (...) -> Optional[Link]
"""
Convert an anchor element in a simple repository page to a Link.
"""
href = anchor.get("href")
if not href:
return None
url = _clean_link(urllib_parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
# This is a unicode string in Python 2 (and 3).
yanked_reason = unescape(yanked_reason)
link = Link(
url,
comes_from=page_url,
requires_python=pyrequire,
yanked_reason=yanked_reason,
)
return link
class CacheablePageContent(object):
def __init__(self, page):
# type: (HTMLPage) -> None
assert page.cache_link_parsing
self.page = page
def __eq__(self, other):
# type: (object) -> bool
return (isinstance(other, type(self)) and
self.page.url == other.page.url)
def __hash__(self):
# type: () -> int
return hash(self.page.url)
def with_cached_html_pages(
fn, # type: Callable[[HTMLPage], Iterable[Link]]
):
# type: (...) -> Callable[[HTMLPage], List[Link]]
"""
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
"""
@_lru_cache(maxsize=None)
def wrapper(cacheable_page):
# type: (CacheablePageContent) -> List[Link]
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page):
# type: (HTMLPage) -> List[Link]
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper
@with_cached_html_pages
def parse_links(page):
# type: (HTMLPage) -> Iterable[Link]
"""
Parse an HTML document, and yield its anchor elements as Link objects.
"""
document = html5lib.parse(
page.content,
transport_encoding=page.encoding,
namespaceHTMLElements=False,
)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall(".//a"):
link = _create_link_from_element(
anchor,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(
self,
content, # type: bytes
encoding, # type: Optional[str]
url, # type: str
cache_link_parsing=True, # type: bool
):
# type: (...) -> None
"""
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
"""
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
def __str__(self):
# type: () -> str
return redact_auth_from_url(self.url)
def _handle_get_page_fail(
link, # type: Link
reason, # type: Union[str, Exception]
meth=None # type: Optional[Callable[..., None]]
):
# type: (...) -> None
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_html_page(response, cache_link_parsing=True):
# type: (Response, bool) -> HTMLPage
encoding = _get_encoding_from_headers(response.headers)
return HTMLPage(
response.content,
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing)
def _get_html_page(link, session=None):
# type: (Link, Optional[PipSession]) -> Optional[HTMLPage]
if session is None:
raise TypeError(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
url = link.url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.debug('Cannot look at %s URL %s', vcs_scheme, link)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib_parse.urlparse(url)
if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.debug(
'Skipping page %s because it looks like an archive, and cannot '
'be checked by HEAD.', link,
)
except _NotHTML as exc:
logger.warning(
'Skipping page %s because the %s request got Content-Type: %s.'
'The only supported Content-Type is text/html',
link, exc.request_desc, exc.content_type,
)
except HTTPError as exc:
_handle_get_page_fail(link, exc)
except RetryError as exc:
_handle_get_page_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_page_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_page_fail(link, "connection error: {}".format(exc))
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
return _make_html_page(resp,
cache_link_parsing=link.cache_link_parsing)
return None
def _remove_duplicate_links(links):
# type: (Iterable[Link]) -> List[Link]
"""
Return a list of links, with duplicates removed and ordering preserved.
"""
# We preserve the ordering when removing duplicates because we can.
return list(OrderedDict.fromkeys(links))
def group_locations(locations, expand_dir=False):
# type: (Sequence[str], bool) -> Tuple[List[str], List[str]]
"""
Divide a list of locations into two groups: "files" (archives) and "urls."
:return: A pair of lists (files, urls).
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
# type: (str) -> None
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
else:
logger.warning(
"Path '{0}' is ignored: "
"it is a directory.".format(path),
)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url,
)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url,
)
return files, urls
class CollectedLinks(object):
"""
Encapsulates the return value of a call to LinkCollector.collect_links().
The return value includes both URLs to project pages containing package
links, as well as individual package Link objects collected from other
sources.
This info is stored separately as:
(1) links from the configured file locations,
(2) links from the configured find_links, and
(3) urls to HTML project pages, as described by the PEP 503 simple
repository API.
"""
def __init__(
self,
files, # type: List[Link]
find_links, # type: List[Link]
project_urls, # type: List[Link]
):
# type: (...) -> None
"""
:param files: Links from file locations.
:param find_links: Links from find_links.
:param project_urls: URLs to HTML project pages, as described by
the PEP 503 simple repository API.
"""
self.files = files
self.find_links = find_links
self.project_urls = project_urls
class LinkCollector(object):
"""
Responsible for collecting Link objects from all configured locations,
making network requests as needed.
The class's main method is its collect_links() method.
"""
def __init__(
self,
session, # type: PipSession
search_scope, # type: SearchScope
):
# type: (...) -> None
self.search_scope = search_scope
self.session = session
@property
def find_links(self):
# type: () -> List[str]
return self.search_scope.find_links
def fetch_page(self, location):
# type: (Link) -> Optional[HTMLPage]
"""
Fetch an HTML page containing package links.
"""
return _get_html_page(location, session=self.session)
def collect_links(self, project_name):
# type: (str) -> CollectedLinks
"""Find all available links for the given project name.
:return: All the Link objects (unfiltered), as a CollectedLinks object.
"""
search_scope = self.search_scope
index_locations = search_scope.get_index_urls_locations(project_name)
index_file_loc, index_url_loc = group_locations(index_locations)
fl_file_loc, fl_url_loc = group_locations(
self.find_links, expand_dir=True,
)
file_links = [
Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)
]
# We trust every directly linked archive in find_links
find_link_links = [Link(url, '-f') for url in self.find_links]
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links.
# We want to filter out anything that does not have a secure origin.
url_locations = [
link for link in itertools.chain(
# Mark PyPI indices as "cache_link_parsing == False" -- this
# will avoid caching the result of parsing the page for links.
(Link(url, cache_link_parsing=False) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
)
if self.session.is_secure_origin(link)
]
url_locations = _remove_duplicate_links(url_locations)
lines = [
'{} location(s) to search for versions of {}:'.format(
len(url_locations), project_name,
),
]
for link in url_locations:
lines.append('* {}'.format(link))
logger.debug('\n'.join(lines))
return CollectedLinks(
files=file_links,
find_links=find_link_links,
project_urls=url_locations,
)
| 32.59276
| 79
| 0.630941
|
import cgi
import functools
import itertools
import logging
import mimetypes
import os
import re
from collections import OrderedDict
from pip._vendor import html5lib, requests
from pip._vendor.distlib.compat import unescape
from pip._vendor.requests.exceptions import HTTPError, RetryError, SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.models.link import Link
from pip._internal.utils.filetypes import ARCHIVE_EXTENSIONS
from pip._internal.utils.misc import pairwise, redact_auth_from_url
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url, url_to_path
from pip._internal.vcs import is_url, vcs
if MYPY_CHECK_RUNNING:
from typing import (
Callable, Iterable, List, MutableMapping, Optional,
Protocol, Sequence, Tuple, TypeVar, Union,
)
import xml.etree.ElementTree
from pip._vendor.requests import Response
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.session import PipSession
HTMLElement = xml.etree.ElementTree.Element
ResponseHeaders = MutableMapping[str, str]
F = TypeVar('F')
class LruCache(Protocol):
def __call__(self, maxsize=None):
raise NotImplementedError
logger = logging.getLogger(__name__)
def noop_lru_cache(maxsize=None):
def _wrapper(f):
return f
return _wrapper
_lru_cache = getattr(functools, "lru_cache", noop_lru_cache)
def _match_vcs_scheme(url):
for scheme in vcs.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
return scheme
return None
def _is_url_like_archive(url):
filename = Link(url).filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
return True
return False
class _NotHTML(Exception):
def __init__(self, content_type, request_desc):
super(_NotHTML, self).__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response):
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url, session):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
_ensure_html_header(resp)
def _get_html_response(url, session):
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
"Cache-Control": "max-age=0",
},
)
resp.raise_for_status()
_ensure_html_header(resp)
return resp
def _get_encoding_from_headers(headers):
# type: (ResponseHeaders) -> Optional[str]
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None
def _determine_base_url(document, page_url):
# type: (HTMLElement, str) -> str
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _clean_url_path_part(part):
# type: (str) -> str
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib_parse.quote(urllib_parse.unquote(part))
def _clean_file_url_path(part):
# type: (str) -> str
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib_request.pathname2url(urllib_request.url2pathname(part))
# percent-encoded: /
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
def _clean_url_path(path, is_local_path):
# type: (str, bool) -> str
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return ''.join(cleaned_parts)
def _clean_link(url):
# type: (str) -> str
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`.
result = urllib_parse.urlparse(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib_parse.urlunparse(result._replace(path=path))
def _create_link_from_element(
anchor, # type: HTMLElement
page_url, # type: str
base_url, # type: str
):
# type: (...) -> Optional[Link]
href = anchor.get("href")
if not href:
return None
url = _clean_link(urllib_parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
# This is a unicode string in Python 2 (and 3).
yanked_reason = unescape(yanked_reason)
link = Link(
url,
comes_from=page_url,
requires_python=pyrequire,
yanked_reason=yanked_reason,
)
return link
class CacheablePageContent(object):
def __init__(self, page):
# type: (HTMLPage) -> None
assert page.cache_link_parsing
self.page = page
def __eq__(self, other):
# type: (object) -> bool
return (isinstance(other, type(self)) and
self.page.url == other.page.url)
def __hash__(self):
# type: () -> int
return hash(self.page.url)
def with_cached_html_pages(
fn, # type: Callable[[HTMLPage], Iterable[Link]]
):
# type: (...) -> Callable[[HTMLPage], List[Link]]
@_lru_cache(maxsize=None)
def wrapper(cacheable_page):
# type: (CacheablePageContent) -> List[Link]
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page):
# type: (HTMLPage) -> List[Link]
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper
@with_cached_html_pages
def parse_links(page):
# type: (HTMLPage) -> Iterable[Link]
document = html5lib.parse(
page.content,
transport_encoding=page.encoding,
namespaceHTMLElements=False,
)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall(".//a"):
link = _create_link_from_element(
anchor,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link
class HTMLPage(object):
def __init__(
self,
content, # type: bytes
encoding, # type: Optional[str]
url, # type: str
cache_link_parsing=True, # type: bool
):
# type: (...) -> None
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
def __str__(self):
# type: () -> str
return redact_auth_from_url(self.url)
def _handle_get_page_fail(
link, # type: Link
reason, # type: Union[str, Exception]
meth=None # type: Optional[Callable[..., None]]
):
# type: (...) -> None
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_html_page(response, cache_link_parsing=True):
# type: (Response, bool) -> HTMLPage
encoding = _get_encoding_from_headers(response.headers)
return HTMLPage(
response.content,
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing)
def _get_html_page(link, session=None):
# type: (Link, Optional[PipSession]) -> Optional[HTMLPage]
if session is None:
raise TypeError(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
url = link.url.split('
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.debug('Cannot look at %s URL %s', vcs_scheme, link)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib_parse.urlparse(url)
if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.debug(
'Skipping page %s because it looks like an archive, and cannot '
'be checked by HEAD.', link,
)
except _NotHTML as exc:
logger.warning(
'Skipping page %s because the %s request got Content-Type: %s.'
'The only supported Content-Type is text/html',
link, exc.request_desc, exc.content_type,
)
except HTTPError as exc:
_handle_get_page_fail(link, exc)
except RetryError as exc:
_handle_get_page_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_page_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_page_fail(link, "connection error: {}".format(exc))
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
return _make_html_page(resp,
cache_link_parsing=link.cache_link_parsing)
return None
def _remove_duplicate_links(links):
return list(OrderedDict.fromkeys(links))
def group_locations(locations, expand_dir=False):
files = []
urls = []
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
else:
logger.warning(
"Path '{0}' is ignored: "
"it is a directory.".format(path),
)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url,
)
elif is_url(url):
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url,
)
return files, urls
class CollectedLinks(object):
def __init__(
self,
files,
find_links,
project_urls,
):
self.files = files
self.find_links = find_links
self.project_urls = project_urls
class LinkCollector(object):
def __init__(
self,
session,
search_scope,
):
self.search_scope = search_scope
self.session = session
@property
def find_links(self):
return self.search_scope.find_links
def fetch_page(self, location):
return _get_html_page(location, session=self.session)
def collect_links(self, project_name):
search_scope = self.search_scope
index_locations = search_scope.get_index_urls_locations(project_name)
index_file_loc, index_url_loc = group_locations(index_locations)
fl_file_loc, fl_url_loc = group_locations(
self.find_links, expand_dir=True,
)
file_links = [
Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)
]
find_link_links = [Link(url, '-f') for url in self.find_links]
url_locations = [
link for link in itertools.chain(
(Link(url, cache_link_parsing=False) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
)
if self.session.is_secure_origin(link)
]
url_locations = _remove_duplicate_links(url_locations)
lines = [
'{} location(s) to search for versions of {}:'.format(
len(url_locations), project_name,
),
]
for link in url_locations:
lines.append('* {}'.format(link))
logger.debug('\n'.join(lines))
return CollectedLinks(
files=file_links,
find_links=find_link_links,
project_urls=url_locations,
)
| true
| true
|
7908acbd34bf9d70ea1f3bd2c829d2b691ffd526
| 3,289
|
py
|
Python
|
catwalk/cicd/build_steps.py
|
LeapBeyond/catwalk
|
49bafe146112b519653ff3417a0974afaec124a2
|
[
"Apache-2.0"
] | 1
|
2020-09-11T01:16:11.000Z
|
2020-09-11T01:16:11.000Z
|
catwalk/cicd/build_steps.py
|
LeapBeyond/catwalk
|
49bafe146112b519653ff3417a0974afaec124a2
|
[
"Apache-2.0"
] | 6
|
2020-05-14T11:15:13.000Z
|
2021-07-14T15:49:20.000Z
|
catwalk/cicd/build_steps.py
|
LeapBeyond/catwalk
|
49bafe146112b519653ff3417a0974afaec124a2
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
#
# Copyright 2019 Leap Beyond Emerging Technologies B.V. (unless otherwise stated)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
"""
Docker step by step building blocks:
generate docker image, prepare model, and build model
"""
import logging
import os.path as osp
import subprocess
from jinja2 import Environment, PackageLoader
from ..utils import get_model_tag_and_version
from .. import __version__ as catwalk_version
logger = logging.getLogger(__name__)
def build_prep(model_path=".", server_config=None, server_port=9090):
"""Prepares the model to be Dockerised by generating a dockerimage"""
model_path = osp.abspath(model_path)
model_tag, model_version = get_model_tag_and_version(model_path)
if server_config is None:
server_config = "false"
kwargs = {
"catwalk_version": catwalk_version,
"model_tag": model_tag,
"model_version": model_version,
"server_config": server_config,
"server_port": server_port
}
files_to_create = ["Dockerfile", ".dockerignore"]
env = Environment(loader=PackageLoader("catwalk", "templates"))
for f in files_to_create:
template_file = f + ".j2"
if template_file[0] == ".":
template_file = template_file[1:]
template = env.get_template(template_file)
rendered = template.render(**kwargs)
out_path = osp.join(model_path, f)
with open(out_path, "w") as fp:
fp.write(rendered)
logger.info("Wrote " + f)
def build(model_path=".", docker_registry=None, push=True, no_cache=False): # pragma: no cover
"""Builds the model into a Dockerised model server image."""
model_path = osp.abspath(model_path)
model_tag, model_version = get_model_tag_and_version(model_path)
model_path = osp.abspath(model_path)
# Setup
image_name_parts = [model_tag]
if docker_registry is not None:
image_name_parts.insert(0, docker_registry)
image_name = "/".join(image_name_parts)
docker_tag = image_name + ":" + model_version
# Perform the docker build
cmd = ["docker", "build", model_path]
cmd += ["-t", docker_tag]
if no_cache:
cmd += ["--no-cache"]
logger.info(" ".join(cmd))
result = subprocess.run(cmd, check=True)
if result.returncode != 0:
return result.returncode
logger.info("Successfully built " + docker_tag)
if not push:
return 0
# Perform the docker push
cmd = ["docker", "push", docker_tag]
logger.info(" ".join(cmd))
result = subprocess.run(cmd, check=True)
return result.returncode
| 31.932039
| 95
| 0.652174
| true
| true
|
|
7908ace045f6e0a0f8aecd2c5983686e5a9e79ba
| 2,869
|
py
|
Python
|
trab2/probOneR.py
|
RafaelPedruzzi/IA-2019-2
|
7d99a8f02ec826403bd48c6eba574d802e558c36
|
[
"MIT"
] | null | null | null |
trab2/probOneR.py
|
RafaelPedruzzi/IA-2019-2
|
7d99a8f02ec826403bd48c6eba574d802e558c36
|
[
"MIT"
] | null | null | null |
trab2/probOneR.py
|
RafaelPedruzzi/IA-2019-2
|
7d99a8f02ec826403bd48c6eba574d802e558c36
|
[
"MIT"
] | null | null | null |
## -------------------------------------------------------- ##
# Trab 2 IA 2019-2
#
# Rafael Belmock Pedruzzi
#
# probOneR.py: implementation of the probabilistic OneR classifier.
#
# Python version: 3.7.4
## -------------------------------------------------------- ##
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics import confusion_matrix
from itertools import product, zip_longest, accumulate
from random import random
class Prob_OneR(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
# check that x and y have correct shape
X, y = check_X_y(X,y)
# store the classes seen during fit
self.classes_ = unique_labels(y)
self.y_ = y
kbd = KBinsDiscretizer(n_bins = len(np.unique(y)), encode='ordinal')
X = kbd.fit_transform(X)
self.X_ = X
self.kbd_ = kbd
cm_list = []
hits = []
for i in X.T:
cm = contingency_matrix(i, y)
cm_list.append(cm)
hits.append(sum(max(k) for k in cm))
rule = np.argmax(hits) # chosen rule
self.r_ = rule
rule_cm = cm_list[rule]
class_selector = []
for i, c in enumerate(rule_cm):
cSum = sum(c)
probRatio = [ (i/cSum) for i in c]
# Building the "partitions" of the roulette:
probRatio = list(accumulate(probRatio))
class_selector.append(probRatio)
self.class_selector = class_selector
# Return the classifier
return self
def predict(self, X):
# Check is fit had been called
check_is_fitted(self, ['X_', 'y_'])
# Input validation
X = check_array(X)
X = self.kbd_.transform(X)
y = []
for i in X[:,self.r_]:
probRatio = self.class_selector[int(i)]
# Selecting a random element:
selector = random()
for i in range(len(probRatio)):
if selector <= probRatio[i]:
y.append(self.classes_[i])
break
return y
# from sklearn import datasets
# from sklearn.model_selection import train_test_split, cross_val_score
# from sklearn.metrics import f1_score
# nn= Prob_OneR()
# iris = datasets.load_iris()
# x_train,x_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size = 0.4, random_state = 0)
# nn.fit(x_train, y_train)
# y_pred = nn.predict(x_test)
# print(y_test)
# print(y_pred)
# score = cross_val_score(nn, x_train, y_train, cv = 5)
# print(score)
| 30.521277
| 107
| 0.606832
|
X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics import confusion_matrix
from itertools import product, zip_longest, accumulate
from random import random
class Prob_OneR(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
X, y = check_X_y(X,y)
self.classes_ = unique_labels(y)
self.y_ = y
kbd = KBinsDiscretizer(n_bins = len(np.unique(y)), encode='ordinal')
X = kbd.fit_transform(X)
self.X_ = X
self.kbd_ = kbd
cm_list = []
hits = []
for i in X.T:
cm = contingency_matrix(i, y)
cm_list.append(cm)
hits.append(sum(max(k) for k in cm))
rule = np.argmax(hits)
self.r_ = rule
rule_cm = cm_list[rule]
class_selector = []
for i, c in enumerate(rule_cm):
cSum = sum(c)
probRatio = [ (i/cSum) for i in c]
probRatio = list(accumulate(probRatio))
class_selector.append(probRatio)
self.class_selector = class_selector
return self
def predict(self, X):
check_is_fitted(self, ['X_', 'y_'])
X = check_array(X)
X = self.kbd_.transform(X)
y = []
for i in X[:,self.r_]:
probRatio = self.class_selector[int(i)]
selector = random()
for i in range(len(probRatio)):
if selector <= probRatio[i]:
y.append(self.classes_[i])
break
return y
| true
| true
|
7908ad23e68529ddbb7cb39f7a16a4e6ec525a17
| 1,301
|
py
|
Python
|
var/spack/repos/builtin/packages/volk/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/volk/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/volk/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Volk(CMakePackage):
"""VOLK is the Vector-Optimized Library of Kernels. It is a
library that contains kernels of hand-written SIMD code for
different mathematical operations. Since each SIMD architecture
can be very different and no compiler has yet come along to handle
vectorization properly or highly efficiently, VOLK approaches the
problem differently.
For each architecture or platform that a developer wishes to
vectorize for, a new proto-kernel is added to VOLK. At runtime,
VOLK will select the correct proto-kernel. In this way, the users
of VOLK call a kernel for performing the operation that is
platform/architecture agnostic. This allows us to write portable
SIMD code."""
homepage = "https://github.com/gnuradio/volk"
url = "https://github.com/gnuradio/volk/archive/v2.3.0.tar.gz"
maintainers = ['aweits']
version('2.3.0', sha256='f42c928f561b128acfe4adb21227e4a62a3f6ab8103592fc3233765ff326d5fc')
depends_on('python@3.4:', type=('build', 'run'))
depends_on('py-mako@0.4.2:', type=('build', 'run'))
| 41.967742
| 95
| 0.734051
|
class Volk(CMakePackage):
homepage = "https://github.com/gnuradio/volk"
url = "https://github.com/gnuradio/volk/archive/v2.3.0.tar.gz"
maintainers = ['aweits']
version('2.3.0', sha256='f42c928f561b128acfe4adb21227e4a62a3f6ab8103592fc3233765ff326d5fc')
depends_on('python@3.4:', type=('build', 'run'))
depends_on('py-mako@0.4.2:', type=('build', 'run'))
| true
| true
|
7908ad6d7302df6f93d2d5efba7ad9338cdd8e22
| 1,344
|
py
|
Python
|
lecture3/bootcamp3/script.py
|
wendazhou/cds-bootcamp
|
d3289cf56fc47759afe5bac091f446e9b60037ce
|
[
"MIT"
] | 6
|
2021-09-02T18:36:11.000Z
|
2021-09-24T19:56:38.000Z
|
lecture3/bootcamp3/script.py
|
wendazhou/cds-bootcamp
|
d3289cf56fc47759afe5bac091f446e9b60037ce
|
[
"MIT"
] | null | null | null |
lecture3/bootcamp3/script.py
|
wendazhou/cds-bootcamp
|
d3289cf56fc47759afe5bac091f446e9b60037ce
|
[
"MIT"
] | 8
|
2021-09-02T23:46:30.000Z
|
2021-09-27T09:54:48.000Z
|
import dataclasses
import os
from typing import List
import hydra
@dataclasses.dataclass
class ModelConfig:
"""Configuration for the model.
Note that `block_sizes` must be specified using the `dataclasses.field`
function, as you are not allowed to supply default values for mutable fields.
Instead, the default value is supplied through a default factory function which
creates a new list every time.
"""
architecture: str = 'lenet'
hidden_size: int = 20
block_sizes: List[int] = dataclasses.field(default_factory=lambda: [10, 10, 10])
@dataclasses.dataclass
class TrainingConfig:
model: ModelConfig = ModelConfig()
num_epochs: int = 10
data_path: str = 'data.npy'
@hydra.main(config_path=None, config_name='config')
def main(config: TrainingConfig):
print(f'Got configuration: {config}')
# Note here: when loading data, should convert to absolute path
data_path = hydra.utils.to_absolute_path(config.data_path)
print(f'Loading data from {data_path}')
# Note here: saving to relative path is set to output folder
result_path = os.path.abspath('result.txt')
print(f'Saving results to {result_path}')
if __name__ == '__main__':
from hydra.core.config_store import ConfigStore
cs = ConfigStore()
cs.store('config', node=TrainingConfig)
main()
| 28.595745
| 84
| 0.72247
|
import dataclasses
import os
from typing import List
import hydra
@dataclasses.dataclass
class ModelConfig:
architecture: str = 'lenet'
hidden_size: int = 20
block_sizes: List[int] = dataclasses.field(default_factory=lambda: [10, 10, 10])
@dataclasses.dataclass
class TrainingConfig:
model: ModelConfig = ModelConfig()
num_epochs: int = 10
data_path: str = 'data.npy'
@hydra.main(config_path=None, config_name='config')
def main(config: TrainingConfig):
print(f'Got configuration: {config}')
data_path = hydra.utils.to_absolute_path(config.data_path)
print(f'Loading data from {data_path}')
result_path = os.path.abspath('result.txt')
print(f'Saving results to {result_path}')
if __name__ == '__main__':
from hydra.core.config_store import ConfigStore
cs = ConfigStore()
cs.store('config', node=TrainingConfig)
main()
| true
| true
|
7908ae400c8e9322b407af8a00b4200db700094f
| 3,328
|
py
|
Python
|
antipetros_discordbot/utility/gidsql/db_action_base.py
|
official-antistasi-community/Antipetros_Discord_Bot
|
1b5c8b61c09e61cdff671e259f0478d343a50c8d
|
[
"MIT"
] | null | null | null |
antipetros_discordbot/utility/gidsql/db_action_base.py
|
official-antistasi-community/Antipetros_Discord_Bot
|
1b5c8b61c09e61cdff671e259f0478d343a50c8d
|
[
"MIT"
] | null | null | null |
antipetros_discordbot/utility/gidsql/db_action_base.py
|
official-antistasi-community/Antipetros_Discord_Bot
|
1b5c8b61c09e61cdff671e259f0478d343a50c8d
|
[
"MIT"
] | 1
|
2021-02-12T01:10:51.000Z
|
2021-02-12T01:10:51.000Z
|
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
import os
import logging
import sqlite3 as sqlite
from pprint import pformat
# * Gid Imports ----------------------------------------------------------------------------------------->
import gidlogger as glog
# endregion[Imports]
__updated__ = '2020-11-26 17:04:37'
# region [AppUserData]
# endregion [AppUserData]
# region [Logging]
log = logging.getLogger('gidsql')
glog.import_notification(log, __name__)
# endregion[Logging]
# region [Constants]
# endregion[Constants]
class GidSqliteActionBase:
def __init__(self, in_db_loc, in_pragmas=None):
self.db_loc = in_db_loc
self.pragmas = in_pragmas
glog.class_init_notification(log, self)
@property
def exists(self):
"""
checks if the db exist and logs it
Returns
-------
bool
bool if the file exist or not
"""
if os.path.isfile(self.db_loc):
log.info("database at %s, does EXIST", self.db_loc)
return True
else:
log.info("databse at %s does NOT EXIST", self.db_loc)
return False
@staticmethod
def _handle_error(error, sql_phrase, variables):
log.critical("%s - with SQL --> %s and args[%s]", str(error), sql_phrase, pformat(variables))
if 'syntax error' in str(error):
raise SyntaxError(error)
raise sqlite.Error(error)
def _execute_pragmas(self, in_cursor):
if self.pragmas is not None and self.pragmas != '':
in_cursor.executescript(self.pragmas)
log.debug("Executed pragmas '%s' successfully", self.pragmas)
def __repr__(self):
return f"{self.__class__.__name__} ('{self.db_loc}')"
def __str__(self):
return self.__class__.__name__
class AioGidSqliteActionBase:
def __init__(self, in_db_loc, in_pragmas=None):
self.db_loc = in_db_loc
self.pragmas = in_pragmas
glog.class_init_notification(log, self)
@property
def exists(self):
"""
checks if the db exist and logs it
Returns
-------
bool
bool if the file exist or not
"""
if os.path.isfile(self.db_loc):
log.info("database at %s, does EXIST", self.db_loc)
return True
else:
log.info("databse at %s does NOT EXIST", self.db_loc)
return False
@staticmethod
async def _handle_error(error, sql_phrase, variables):
log.critical("%s - with SQL --> %s and args[%s]", str(error), sql_phrase, pformat(variables))
if 'syntax error' in str(error):
raise SyntaxError(error)
raise sqlite.Error(error)
async def _execute_pragmas(self, in_connection):
if self.pragmas not in [None, '', []]:
await in_connection.executescript(self.pragmas)
log.debug("Executed pragmas '%s' successfully", self.pragmas)
def __repr__(self):
return f"{self.__class__.__name__} ('{self.db_loc}')"
def __str__(self):
return self.__class__.__name__
# region[Main_Exec]
if __name__ == '__main__':
pass
# endregion[Main_Exec]
| 26.624
| 106
| 0.578425
|
import os
import logging
import sqlite3 as sqlite
from pprint import pformat
import gidlogger as glog
__updated__ = '2020-11-26 17:04:37'
log = logging.getLogger('gidsql')
glog.import_notification(log, __name__)
class GidSqliteActionBase:
def __init__(self, in_db_loc, in_pragmas=None):
self.db_loc = in_db_loc
self.pragmas = in_pragmas
glog.class_init_notification(log, self)
@property
def exists(self):
if os.path.isfile(self.db_loc):
log.info("database at %s, does EXIST", self.db_loc)
return True
else:
log.info("databse at %s does NOT EXIST", self.db_loc)
return False
@staticmethod
def _handle_error(error, sql_phrase, variables):
log.critical("%s - with SQL --> %s and args[%s]", str(error), sql_phrase, pformat(variables))
if 'syntax error' in str(error):
raise SyntaxError(error)
raise sqlite.Error(error)
def _execute_pragmas(self, in_cursor):
if self.pragmas is not None and self.pragmas != '':
in_cursor.executescript(self.pragmas)
log.debug("Executed pragmas '%s' successfully", self.pragmas)
def __repr__(self):
return f"{self.__class__.__name__} ('{self.db_loc}')"
def __str__(self):
return self.__class__.__name__
class AioGidSqliteActionBase:
def __init__(self, in_db_loc, in_pragmas=None):
self.db_loc = in_db_loc
self.pragmas = in_pragmas
glog.class_init_notification(log, self)
@property
def exists(self):
if os.path.isfile(self.db_loc):
log.info("database at %s, does EXIST", self.db_loc)
return True
else:
log.info("databse at %s does NOT EXIST", self.db_loc)
return False
@staticmethod
async def _handle_error(error, sql_phrase, variables):
log.critical("%s - with SQL --> %s and args[%s]", str(error), sql_phrase, pformat(variables))
if 'syntax error' in str(error):
raise SyntaxError(error)
raise sqlite.Error(error)
async def _execute_pragmas(self, in_connection):
if self.pragmas not in [None, '', []]:
await in_connection.executescript(self.pragmas)
log.debug("Executed pragmas '%s' successfully", self.pragmas)
def __repr__(self):
return f"{self.__class__.__name__} ('{self.db_loc}')"
def __str__(self):
return self.__class__.__name__
if __name__ == '__main__':
pass
| true
| true
|
7908ae9467417ce8d704aefc099c2686a5ebe876
| 6,332
|
py
|
Python
|
nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py
|
Jiawei-Yang/TumorCP
|
6053c75642fcbc0fb0424320ab3d758f24883b0e
|
[
"Apache-2.0"
] | 12
|
2021-07-22T15:08:13.000Z
|
2022-03-10T08:15:56.000Z
|
nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py
|
Jiawei-Yang/TumorCP
|
6053c75642fcbc0fb0424320ab3d758f24883b0e
|
[
"Apache-2.0"
] | 1
|
2022-03-07T13:21:42.000Z
|
2022-03-07T13:21:42.000Z
|
nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py
|
Jiawei-Yang/TumorCP
|
6053c75642fcbc0fb0424320ab3d758f24883b0e
|
[
"Apache-2.0"
] | 3
|
2021-11-26T06:26:24.000Z
|
2022-02-14T01:23:44.000Z
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
import torch
from nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_ResencUNet(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
cfg = get_default_network_config(3, None, norm_type="in")
else:
cfg = get_default_network_config(1, None, norm_type="in")
stage_plans = self.plans['plans_per_stage'][self.stage]
conv_kernel_sizes = stage_plans['conv_kernel_sizes']
blocks_per_stage_encoder = stage_plans['num_blocks_encoder']
blocks_per_stage_decoder = stage_plans['num_blocks_decoder']
pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2,
pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes,
blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2))
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def setup_DA_params(self):
"""
net_num_pool_op_kernel_sizes is different in resunet
"""
super().setup_DA_params()
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1]
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.validate(self, do_mirroring=do_mirroring, use_sliding_window=use_sliding_window,
step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name,
debug=debug, all_in_gpu=all_in_gpu,
segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.decoder.deep_supervision = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs,
all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.decoder.deep_supervision = ds
return ret
def run_training(self):
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = True
ret = nnUNetTrainer.run_training(self)
self.network.decoder.deep_supervision = ds
return ret
nnUNetTrainerV2_ResencUNet_copy1 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy2 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy3 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy4 = nnUNetTrainerV2_ResencUNet
| 59.735849
| 134
| 0.609128
|
from typing import Tuple
import numpy as np
import torch
from nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_ResencUNet(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
cfg = get_default_network_config(3, None, norm_type="in")
else:
cfg = get_default_network_config(1, None, norm_type="in")
stage_plans = self.plans['plans_per_stage'][self.stage]
conv_kernel_sizes = stage_plans['conv_kernel_sizes']
blocks_per_stage_encoder = stage_plans['num_blocks_encoder']
blocks_per_stage_decoder = stage_plans['num_blocks_decoder']
pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2,
pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes,
blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2))
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def setup_DA_params(self):
super().setup_DA_params()
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1]
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.validate(self, do_mirroring=do_mirroring, use_sliding_window=use_sliding_window,
step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name,
debug=debug, all_in_gpu=all_in_gpu,
segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.decoder.deep_supervision = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs,
all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.decoder.deep_supervision = ds
return ret
def run_training(self):
self.maybe_update_lr(self.epoch)
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = True
ret = nnUNetTrainer.run_training(self)
self.network.decoder.deep_supervision = ds
return ret
nnUNetTrainerV2_ResencUNet_copy1 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy2 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy3 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy4 = nnUNetTrainerV2_ResencUNet
| true
| true
|
7908af2d88b02dcacbcf31e7a2ec4c34bc3b05fd
| 1,733
|
py
|
Python
|
1614 Maximum Nesting Depth of the Parentheses.py
|
AtharvRedij/leetcode-solutions
|
7194d202302989d53c241b12c9befb06923b1510
|
[
"MIT"
] | null | null | null |
1614 Maximum Nesting Depth of the Parentheses.py
|
AtharvRedij/leetcode-solutions
|
7194d202302989d53c241b12c9befb06923b1510
|
[
"MIT"
] | null | null | null |
1614 Maximum Nesting Depth of the Parentheses.py
|
AtharvRedij/leetcode-solutions
|
7194d202302989d53c241b12c9befb06923b1510
|
[
"MIT"
] | 1
|
2021-03-06T06:15:48.000Z
|
2021-03-06T06:15:48.000Z
|
'''
URL: https://leetcode.com/problems/maximum-nesting-depth-of-the-parentheses/
Difficulty: Easy
Description: Maximum Nesting Depth of the Parentheses
A string is a valid parentheses string (denoted VPS) if it meets one of the following:
It is an empty string "", or a single character not equal to "(" or ")",
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(C) = 0, where C is a string with a single character not equal to "(" or ")".
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's.
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0, 1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS represented as string s, return the nesting depth of s.
Example 1:
Input: s = "(1+(2*3)+((8)/4))+1"
Output: 3
Explanation: Digit 8 is inside of 3 nested parentheses in the string.
Example 2:
Input: s = "(1)+((2))+(((3)))"
Output: 3
Example 3:
Input: s = "1+(2*3)/(2-1)"
Output: 1
Example 4:
Input: s = "1"
Output: 0
Constraints:
1 <= s.length <= 100
s consists of digits 0-9 and characters '+', '-', '*', '/', '(', and ')'.
It is guaranteed that parentheses expression s is a VPS.
'''
class Solution:
def maxDepth(self, s):
maxD = -float('inf')
currD = 0
for ch in s:
if ch not in ["(", ")"]:
continue
if ch == "(":
currD += 1
else:
maxD = max(maxD, currD)
currD -= 1
return maxD if maxD != -float('inf') else currD
| 25.115942
| 118
| 0.58569
|
class Solution:
def maxDepth(self, s):
maxD = -float('inf')
currD = 0
for ch in s:
if ch not in ["(", ")"]:
continue
if ch == "(":
currD += 1
else:
maxD = max(maxD, currD)
currD -= 1
return maxD if maxD != -float('inf') else currD
| true
| true
|
7908af463e548ffa8196ab21addce3f67eb3bfdb
| 2,105
|
py
|
Python
|
examples/slack/query.py
|
q0w/snug
|
a9de335b48d96190a2bfe5e606830c4a60cb5705
|
[
"MIT"
] | 123
|
2018-01-23T17:29:29.000Z
|
2022-02-11T06:57:57.000Z
|
examples/slack/query.py
|
q0w/snug
|
a9de335b48d96190a2bfe5e606830c4a60cb5705
|
[
"MIT"
] | 274
|
2018-01-25T07:17:55.000Z
|
2022-01-20T07:37:10.000Z
|
examples/slack/query.py
|
q0w/snug
|
a9de335b48d96190a2bfe5e606830c4a60cb5705
|
[
"MIT"
] | 5
|
2017-11-26T21:31:12.000Z
|
2021-11-28T10:19:57.000Z
|
"""common logic for all queries"""
import json
from functools import partial, singledispatch
from operator import itemgetter
import snug
from gentools import (compose, map_yield, map_send, oneyield, reusable,
map_return)
from .load import registry
API_URL = 'https://slack.com/api/'
class ApiError(Exception):
pass
def _parse_content(response):
"""parse the response body as JSON, raise on errors"""
if response.status_code != 200:
raise ApiError(f'unknown error: {response.content.decode()}')
result = json.loads(response.content)
if not result['ok']:
raise ApiError(f'{result["error"]}: {result.get("detail")}')
return result
basic_interaction = compose(map_yield(snug.prefix_adder(API_URL)),
map_send(_parse_content))
"""basic request/response parsing"""
@singledispatch
def _dump_queryparam_value(val):
return str(val)
@_dump_queryparam_value.register(bool)
def _dump_bool_value(val):
return 'true' if val else 'false'
def _dump_params(params):
return {k: _dump_queryparam_value(v) for k, v in params.items()
if v is not None}
def paginated_retrieval(methodname, itemtype):
"""decorator factory for retrieval queries from query params"""
return compose(
reusable,
basic_interaction,
map_yield(partial(_params_as_get, methodname)),
)
def _params_as_get(methodname: str, params: dict) -> snug.Request:
return snug.GET(methodname, params=_dump_params(params))
def json_post(methodname, rtype, key):
"""decorator factory for json POST queries"""
return compose(
reusable,
map_return(registry(rtype), itemgetter(key)),
basic_interaction,
map_yield(partial(_json_as_post, methodname)),
oneyield,
)
def _json_as_post(methodname: str, body: dict) -> snug.Request:
return snug.POST(methodname,
json.dumps({k: v for k, v in body.items()
if v is not None}),
headers={'Content-Type': 'application/json'})
| 26.987179
| 71
| 0.663183
|
import json
from functools import partial, singledispatch
from operator import itemgetter
import snug
from gentools import (compose, map_yield, map_send, oneyield, reusable,
map_return)
from .load import registry
API_URL = 'https://slack.com/api/'
class ApiError(Exception):
pass
def _parse_content(response):
if response.status_code != 200:
raise ApiError(f'unknown error: {response.content.decode()}')
result = json.loads(response.content)
if not result['ok']:
raise ApiError(f'{result["error"]}: {result.get("detail")}')
return result
basic_interaction = compose(map_yield(snug.prefix_adder(API_URL)),
map_send(_parse_content))
@singledispatch
def _dump_queryparam_value(val):
return str(val)
@_dump_queryparam_value.register(bool)
def _dump_bool_value(val):
return 'true' if val else 'false'
def _dump_params(params):
return {k: _dump_queryparam_value(v) for k, v in params.items()
if v is not None}
def paginated_retrieval(methodname, itemtype):
return compose(
reusable,
basic_interaction,
map_yield(partial(_params_as_get, methodname)),
)
def _params_as_get(methodname: str, params: dict) -> snug.Request:
return snug.GET(methodname, params=_dump_params(params))
def json_post(methodname, rtype, key):
return compose(
reusable,
map_return(registry(rtype), itemgetter(key)),
basic_interaction,
map_yield(partial(_json_as_post, methodname)),
oneyield,
)
def _json_as_post(methodname: str, body: dict) -> snug.Request:
return snug.POST(methodname,
json.dumps({k: v for k, v in body.items()
if v is not None}),
headers={'Content-Type': 'application/json'})
| true
| true
|
7908afa6a715b32c06a856b1922c85e7ed8995bb
| 2,774
|
py
|
Python
|
test/test_neopixel.py
|
fovallesp/esp32-python
|
95f7377e575618d1638caa2e041b5fb715d7ae90
|
[
"MIT"
] | 53
|
2019-08-24T14:04:21.000Z
|
2022-01-16T11:00:58.000Z
|
test/test_neopixel.py
|
fovallesp/esp32-python
|
95f7377e575618d1638caa2e041b5fb715d7ae90
|
[
"MIT"
] | 1
|
2020-03-28T12:03:42.000Z
|
2020-12-12T08:26:42.000Z
|
test/test_neopixel.py
|
fovallesp/esp32-python
|
95f7377e575618d1638caa2e041b5fb715d7ae90
|
[
"MIT"
] | 8
|
2019-11-08T09:15:02.000Z
|
2022-01-14T20:27:48.000Z
|
import time
import neopixel
from resetMachine import *
@pytest.fixture()
def tenPixelStrand():
pin = machine.Pin(5)
return neopixel.NeoPixel(pin, n=10)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
class TestNeoPixel:
pin = machine.Pin(5)
def test_canSetPixelColor(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
assert tenPixelStrand[0] == green
assert tenPixelStrand[1] == red
def test_mustCallWriteToDisplay(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
assert len(tenPixelStrand.writesForTesting) == 0
tenPixelStrand.write()
assert len(tenPixelStrand.writesForTesting) == 1
def test_fill(self, resetMachine, tenPixelStrand):
tenPixelStrand.fill(green)
assert _allPixelsAreColor(tenPixelStrand, green)
def test_recordsWrites(self, resetMachine, tenPixelStrand):
delayTime = 300
tenPixelStrand.fill(green)
tenPixelStrand.write()
time.sleep(delayTime / 1000)
tenPixelStrand.fill(red)
tenPixelStrand.write()
writeHistory = tenPixelStrand.writesForTesting
assert len(writeHistory) == 2
assert _allPixelsAreColor(writeHistory[0], green)
assert writeHistory[0].timeFromFirstWrite == 0
assert _allPixelsAreColor(writeHistory[1], red)
assert _approximately(writeHistory[1].timeFromFirstWrite) == delayTime
def test_writeUpdatesPixels(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
tenPixelStrand.write()
assert len(tenPixelStrand.writesForTesting) == 1
writtenStrand = tenPixelStrand.writesForTesting[0]
assert writtenStrand[0] == green
assert writtenStrand[1] == red
assert writtenStrand.timeFromFirstWrite == 0
def test_initWithDefaults(self, resetMachine):
np = neopixel.NeoPixel(self.pin, n=10)
assert np.pin == self.pin
assert np.n == 10
assert np.bpp == 3
assert np.timing == 1
def test_initWithOverrides(self, resetMachine):
np = neopixel.NeoPixel(self.pin, n=10, bpp=4, timing=2)
assert np.bpp == 4
assert np.timing == 2
def test_invalid_bytes_per_pixel(self, resetMachine):
try:
neopixel.NeoPixel(self.pin, n=10, bpp=5, timing=2)
assert 0
except OSError:
pass
def _approximately(exactMilliSeconds):
return int(exactMilliSeconds / 10) * 10
def _allPixelsAreColor(strand, color):
pixelCount = strand.n
for i in range(pixelCount):
if strand[i] != color:
return False
return True
| 29.827957
| 78
| 0.658616
|
import time
import neopixel
from resetMachine import *
@pytest.fixture()
def tenPixelStrand():
pin = machine.Pin(5)
return neopixel.NeoPixel(pin, n=10)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
class TestNeoPixel:
pin = machine.Pin(5)
def test_canSetPixelColor(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
assert tenPixelStrand[0] == green
assert tenPixelStrand[1] == red
def test_mustCallWriteToDisplay(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
assert len(tenPixelStrand.writesForTesting) == 0
tenPixelStrand.write()
assert len(tenPixelStrand.writesForTesting) == 1
def test_fill(self, resetMachine, tenPixelStrand):
tenPixelStrand.fill(green)
assert _allPixelsAreColor(tenPixelStrand, green)
def test_recordsWrites(self, resetMachine, tenPixelStrand):
delayTime = 300
tenPixelStrand.fill(green)
tenPixelStrand.write()
time.sleep(delayTime / 1000)
tenPixelStrand.fill(red)
tenPixelStrand.write()
writeHistory = tenPixelStrand.writesForTesting
assert len(writeHistory) == 2
assert _allPixelsAreColor(writeHistory[0], green)
assert writeHistory[0].timeFromFirstWrite == 0
assert _allPixelsAreColor(writeHistory[1], red)
assert _approximately(writeHistory[1].timeFromFirstWrite) == delayTime
def test_writeUpdatesPixels(self, resetMachine, tenPixelStrand):
tenPixelStrand[0] = green
tenPixelStrand[1] = red
tenPixelStrand.write()
assert len(tenPixelStrand.writesForTesting) == 1
writtenStrand = tenPixelStrand.writesForTesting[0]
assert writtenStrand[0] == green
assert writtenStrand[1] == red
assert writtenStrand.timeFromFirstWrite == 0
def test_initWithDefaults(self, resetMachine):
np = neopixel.NeoPixel(self.pin, n=10)
assert np.pin == self.pin
assert np.n == 10
assert np.bpp == 3
assert np.timing == 1
def test_initWithOverrides(self, resetMachine):
np = neopixel.NeoPixel(self.pin, n=10, bpp=4, timing=2)
assert np.bpp == 4
assert np.timing == 2
def test_invalid_bytes_per_pixel(self, resetMachine):
try:
neopixel.NeoPixel(self.pin, n=10, bpp=5, timing=2)
assert 0
except OSError:
pass
def _approximately(exactMilliSeconds):
return int(exactMilliSeconds / 10) * 10
def _allPixelsAreColor(strand, color):
pixelCount = strand.n
for i in range(pixelCount):
if strand[i] != color:
return False
return True
| true
| true
|
7908b019cd0b882a4564f6bbc9a04bd6d1644d17
| 865
|
py
|
Python
|
setup/nvidia/nvml-test.py
|
forwardmeasure/kubeflow
|
7cfa52569c15f1716ce1dadb4352bdee9c9463a5
|
[
"MIT"
] | null | null | null |
setup/nvidia/nvml-test.py
|
forwardmeasure/kubeflow
|
7cfa52569c15f1716ce1dadb4352bdee9c9463a5
|
[
"MIT"
] | null | null | null |
setup/nvidia/nvml-test.py
|
forwardmeasure/kubeflow
|
7cfa52569c15f1716ce1dadb4352bdee9c9463a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pip install nvidia-ml-py3 --user
import pynvml
try:
pynvml.nvmlInit()
except pynvml.NVMLError as error:
print(error)
# Driver Not Loaded 驱动加载失败(没装驱动或者驱动有问题)
# Insufficent Permission 没有以管理员权限运行 pynvml.NVMLError_DriverNotLoaded: Driver Not Loaded
exit()
try:
print(pynvml.nvmlDeviceGetCount())
except pynvml.NVMLError as error:
print(error)
print(pynvml.nvmlDeviceGetCount())# total gpu count = 1
print(pynvml.nvmlSystemGetDriverVersion()) # 396.54
GPU_ID = 0
handle = pynvml.nvmlDeviceGetHandleByIndex(GPU_ID)
print(pynvml.nvmlDeviceGetName(handle)) # GeForce GTX 1060
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
MB_SIZE = 1024*1024
print(meminfo.total/MB_SIZE) # 6078 MB
print(meminfo.used/MB_SIZE) # 531 MB
print(meminfo.free/MB_SIZE) # 5546 MB
pynvml.nvmlShutdown()
| 25.441176
| 92
| 0.746821
|
import pynvml
try:
pynvml.nvmlInit()
except pynvml.NVMLError as error:
print(error)
exit()
try:
print(pynvml.nvmlDeviceGetCount())
except pynvml.NVMLError as error:
print(error)
print(pynvml.nvmlDeviceGetCount())
print(pynvml.nvmlSystemGetDriverVersion())
GPU_ID = 0
handle = pynvml.nvmlDeviceGetHandleByIndex(GPU_ID)
print(pynvml.nvmlDeviceGetName(handle))
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
MB_SIZE = 1024*1024
print(meminfo.total/MB_SIZE)
print(meminfo.used/MB_SIZE)
print(meminfo.free/MB_SIZE)
pynvml.nvmlShutdown()
| true
| true
|
7908b0333fbc448eeab2aadb5dfc1e4dfccde4d4
| 16,823
|
py
|
Python
|
django/contrib/admin/templatetags/admin_list.py
|
vpoulailleau/django
|
02365d3f38a64a5c2f3e932f23925a381d5bb151
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/contrib/admin/templatetags/admin_list.py
|
vpoulailleau/django
|
02365d3f38a64a5c2f3e932f23925a381d5bb151
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 11
|
2020-03-24T15:46:05.000Z
|
2022-03-11T23:20:58.000Z
|
django/contrib/admin/templatetags/admin_list.py
|
vpoulailleau/django
|
02365d3f38a64a5c2f3e932f23925a381d5bb151
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.templatetags.static import static
from django.urls import NoReverseMatch
from django.utils import formats
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import gettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generate an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generate the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range += [
*range(0, ON_ENDS), DOT,
*range(page_num - ON_EACH_SIDE, page_num + 1),
]
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range += [
*range(page_num + 1, page_num + ON_EACH_SIDE + 1), DOT,
*range(paginator.num_pages - ON_ENDS, paginator.num_pages)
]
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generate the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
field_name = _coerce_field_name(field_name, i)
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
# Is it currently being sorted on?
is_sorted = i in ordering_field_columns
if is_sorted:
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
def make_qs_param(t, n):
return ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": is_sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.svg' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def _coerce_field_name(field_name, field_index):
"""
Coerce a field_name (which may be a callable) to a string.
"""
if callable(field_name):
if field_name.__name__ == '<lambda>':
return 'lambda' + str(field_index)
else:
return field_name.__name__
return field_name
def items_for_result(cl, result, form):
"""
Generate the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if str(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' data-popup-opener="{}"', value
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(str(bf.errors) + str(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', form[cl.model._meta.pk.name])
class ResultList(list):
"""
Wrapper class used to return items in a list_editable changelist, annotated
with the form object for error reporting purposes. Needed to maintain
backwards compatibility with existing admin templates.
"""
def __init__(self, form, *items):
self.form = form
super().__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(form[cl.model._meta.pk.name])
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Display the headers and data list together.
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Display the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
def link(filters):
return cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, 'dates')(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, 'dates')(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, 'dates')(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Display a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| 38.496568
| 110
| 0.569875
|
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.templatetags.static import static
from django.urls import NoReverseMatch
from django.utils import formats
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import gettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range += [
*range(0, ON_ENDS), DOT,
*range(page_num - ON_EACH_SIDE, page_num + 1),
]
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range += [
*range(page_num + 1, page_num + ON_EACH_SIDE + 1), DOT,
*range(paginator.num_pages - ON_ENDS, paginator.num_pages)
]
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
field_name = _coerce_field_name(field_name, i)
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
is_sorted = i in ordering_field_columns
if is_sorted:
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
o_list_primary = []
o_list_remove = []
o_list_toggle = []
def make_qs_param(t, n):
return ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i:
param = make_qs_param(new_order_type, j)
o_list_primary.insert(0, param)
o_list_toggle.append(param)
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": is_sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.svg' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def _coerce_field_name(field_name, field_index):
if callable(field_name):
if field_name.__name__ == '<lambda>':
return 'lambda' + str(field_index)
else:
return field_name.__name__
return field_name
def items_for_result(cl, result, form):
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if str(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' data-popup-opener="{}"', value
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(str(bf.errors) + str(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', form[cl.model._meta.pk.name])
class ResultList(list):
def __init__(self, form, *items):
self.form = form
super().__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(form[cl.model._meta.pk.name])
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
def link(filters):
return cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, 'dates')(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, 'dates')(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, 'dates')(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
context['action_index'] = context.get('action_index', -1) + 1
return context
| true
| true
|
7908b03619f96fddb6b69d3f8632e4174da1158f
| 3,927
|
py
|
Python
|
ganjoor/spiders/hojviri/kashfol-mahjoob/scrapyshkmbab39.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
ganjoor/spiders/hojviri/kashfol-mahjoob/scrapyshkmbab39.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
ganjoor/spiders/hojviri/kashfol-mahjoob/scrapyshkmbab39.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
class scrapyshkmbab39Spider(scrapy.Spider):
name = "scrapyshkmbab39"
allowed_domains = ["ganjoor.net"]
if 1 == 1:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh"]
else:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + "1"]
order = 1
def parse(self, response):
index = 0
sh = dict()
sh["type"] = "fasl"
sh["text"] = dict()
for i, poem in enumerate(response.css("div.poem>article>*")):
if index == 0:
if 0 == 1:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 2:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m2>p::text").extract()).strip()
elif 0 == 3:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 4:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m2>p::text").extract()).strip()
else:
sh["title"] = ''.join(response.css("div.poem>article>h2>a::text").extract_first()).strip()
if poem.css("p::text").extract_first() is None or 'rel="bookmark"' in poem.css('*').extract_first() or 'class="spacer"' in poem.css('*').extract_first() or '<div style=' in poem.css('*').extract_first():
continue
if len(poem.css("div.m1>p")) == 1:
if poem.css("div.b"):
if '٭٭٭' not in poem.css("div.m1>p::text").extract_first() and ''.join(poem.css("div.m1>p::text").extract()).strip() != '':
sh["text"][index] = dict([
("m1", ''.join(poem.css("div.m1>p::text").extract()).strip()),
("m2", ''.join(poem.css("div.m2>p::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if poem.css("div.b2"):
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css('p::text').extract_first() and ''.join(poem.css('p::text').extract()).strip() != '':
sh['text'][index] = dict([
('p', ''.join(poem.css('p::text').extract()).strip())
])
index = index + 1
sh["order"] = self.order
self.order = self.order + 1
yield sh
# next_page = response.css("div.navigation>div.navleft>a::attr(href)").extract_first()
if self.order < (1 + 1):
next_page = response.urljoin("https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + str(self.order))
yield scrapy.Request(next_page, callback=self.parse)
| 59.5
| 215
| 0.466514
|
import scrapy
class scrapyshkmbab39Spider(scrapy.Spider):
name = "scrapyshkmbab39"
allowed_domains = ["ganjoor.net"]
if 1 == 1:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh"]
else:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + "1"]
order = 1
def parse(self, response):
index = 0
sh = dict()
sh["type"] = "fasl"
sh["text"] = dict()
for i, poem in enumerate(response.css("div.poem>article>*")):
if index == 0:
if 0 == 1:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 2:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m2>p::text").extract()).strip()
elif 0 == 3:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 4:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m2>p::text").extract()).strip()
else:
sh["title"] = ''.join(response.css("div.poem>article>h2>a::text").extract_first()).strip()
if poem.css("p::text").extract_first() is None or 'rel="bookmark"' in poem.css('*').extract_first() or 'class="spacer"' in poem.css('*').extract_first() or '<div style=' in poem.css('*').extract_first():
continue
if len(poem.css("div.m1>p")) == 1:
if poem.css("div.b"):
if '٭٭٭' not in poem.css("div.m1>p::text").extract_first() and ''.join(poem.css("div.m1>p::text").extract()).strip() != '':
sh["text"][index] = dict([
("m1", ''.join(poem.css("div.m1>p::text").extract()).strip()),
("m2", ''.join(poem.css("div.m2>p::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if poem.css("div.b2"):
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css('p::text').extract_first() and ''.join(poem.css('p::text').extract()).strip() != '':
sh['text'][index] = dict([
('p', ''.join(poem.css('p::text').extract()).strip())
])
index = index + 1
sh["order"] = self.order
self.order = self.order + 1
yield sh
if self.order < (1 + 1):
next_page = response.urljoin("https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + str(self.order))
yield scrapy.Request(next_page, callback=self.parse)
| true
| true
|
7908b0bf375437c6d5f508a3ec2a505cbe0e3908
| 17,094
|
py
|
Python
|
chip/mchp/util/pack_ec.py
|
coreboot/chrome-ec
|
61044db105bc854167efe83815acb3fcb55deb85
|
[
"BSD-3-Clause"
] | 46
|
2017-02-12T20:48:45.000Z
|
2022-03-01T15:53:39.000Z
|
chip/mchp/util/pack_ec.py
|
coreboot/chrome-ec
|
61044db105bc854167efe83815acb3fcb55deb85
|
[
"BSD-3-Clause"
] | 1
|
2022-01-08T23:28:01.000Z
|
2022-01-09T00:43:16.000Z
|
chip/mchp/util/pack_ec.py
|
coreboot/chrome-ec
|
61044db105bc854167efe83815acb3fcb55deb85
|
[
"BSD-3-Clause"
] | 46
|
2016-02-07T18:43:27.000Z
|
2022-01-03T02:30:51.000Z
|
#!/usr/bin/env python3
# Copyright 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Ignore indention messages, since legacy scripts use 2 spaces instead of 4.
# pylint: disable=bad-indentation,docstring-section-indent
# pylint: disable=docstring-trailing-quotes
# A script to pack EC binary into SPI flash image for MEC17xx
# Based on MEC170x_ROM_Description.pdf DS00002225C (07-28-17).
import argparse
import hashlib
import os
import struct
import subprocess
import tempfile
import zlib # CRC32
# MEC1701 has 256KB SRAM from 0xE0000 - 0x120000
# SRAM is divided into contiguous CODE & DATA
# CODE at [0xE0000, 0x117FFF] DATA at [0x118000, 0x11FFFF]
# SPI flash size for board is 512KB
# Boot-ROM TAG is located at SPI offset 0 (two 4-byte tags)
#
LFW_SIZE = 0x1000
LOAD_ADDR = 0x0E0000
LOAD_ADDR_RW = 0xE1000
HEADER_SIZE = 0x40
SPI_CLOCK_LIST = [48, 24, 16, 12]
SPI_READ_CMD_LIST = [0x3, 0xb, 0x3b, 0x6b]
CRC_TABLE = [0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15,
0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d]
def mock_print(*args, **kwargs):
pass
debug_print = mock_print
def Crc8(crc, data):
"""Update CRC8 value."""
for v in data:
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v >> 4)]);
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v & 0xf)]);
return crc ^ 0x55
def GetEntryPoint(payload_file):
"""Read entry point from payload EC image."""
with open(payload_file, 'rb') as f:
f.seek(4)
s = f.read(4)
return struct.unpack('<I', s)[0]
def GetPayloadFromOffset(payload_file, offset):
"""Read payload and pad it to 64-byte aligned."""
with open(payload_file, 'rb') as f:
f.seek(offset)
payload = bytearray(f.read())
rem_len = len(payload) % 64
if rem_len:
payload += b'\0' * (64 - rem_len)
return payload
def GetPayload(payload_file):
"""Read payload and pad it to 64-byte aligned."""
return GetPayloadFromOffset(payload_file, 0)
def GetPublicKey(pem_file):
"""Extract public exponent and modulus from PEM file."""
result = subprocess.run(['openssl', 'rsa', '-in', pem_file, '-text',
'-noout'], stdout=subprocess.PIPE, encoding='utf-8')
modulus_raw = []
in_modulus = False
for line in result.stdout.splitlines():
if line.startswith('modulus'):
in_modulus = True
elif not line.startswith(' '):
in_modulus = False
elif in_modulus:
modulus_raw.extend(line.strip().strip(':').split(':'))
if line.startswith('publicExponent'):
exp = int(line.split(' ')[1], 10)
modulus_raw.reverse()
modulus = bytearray((int(x, 16) for x in modulus_raw[:256]))
return struct.pack('<Q', exp), modulus
def GetSpiClockParameter(args):
assert args.spi_clock in SPI_CLOCK_LIST, \
"Unsupported SPI clock speed %d MHz" % args.spi_clock
return SPI_CLOCK_LIST.index(args.spi_clock)
def GetSpiReadCmdParameter(args):
assert args.spi_read_cmd in SPI_READ_CMD_LIST, \
"Unsupported SPI read command 0x%x" % args.spi_read_cmd
return SPI_READ_CMD_LIST.index(args.spi_read_cmd)
def PadZeroTo(data, size):
data.extend(b'\0' * (size - len(data)))
def BuildHeader(args, payload_len, load_addr, rorofile):
# Identifier and header version
header = bytearray(b'PHCM\0')
# byte[5]
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
# byte[6]
b = 0
header.append(b)
# byte[7]
header.append(GetSpiReadCmdParameter(args))
# bytes 0x08 - 0x0b
header.extend(struct.pack('<I', load_addr))
# bytes 0x0c - 0x0f
header.extend(struct.pack('<I', GetEntryPoint(rorofile)))
# bytes 0x10 - 0x13
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
# bytes 0x14 - 0x17
header.extend(struct.pack('<I', args.payload_offset))
# bytes 0x14 - 0x3F all 0
PadZeroTo(header, 0x40)
# header signature is appended by the caller
return header
def BuildHeader2(args, payload_len, load_addr, payload_entry):
# Identifier and header version
header = bytearray(b'PHCM\0')
# byte[5]
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
# byte[6]
b = 0
header.append(b)
# byte[7]
header.append(GetSpiReadCmdParameter(args))
# bytes 0x08 - 0x0b
header.extend(struct.pack('<I', load_addr))
# bytes 0x0c - 0x0f
header.extend(struct.pack('<I', payload_entry))
# bytes 0x10 - 0x13
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
# bytes 0x14 - 0x17
header.extend(struct.pack('<I', args.payload_offset))
# bytes 0x14 - 0x3F all 0
PadZeroTo(header, 0x40)
# header signature is appended by the caller
return header
#
# Compute SHA-256 of data and return digest
# as a bytearray
#
def HashByteArray(data):
hasher = hashlib.sha256()
hasher.update(data)
h = hasher.digest()
bah = bytearray(h)
return bah
#
# Return 64-byte signature of byte array data.
# Signature is SHA256 of data with 32 0 bytes appended
#
def SignByteArray(data):
debug_print("Signature is SHA-256 of data")
sigb = HashByteArray(data)
sigb.extend(b'\0' * 32)
return sigb
# MEC1701H supports two 32-bit Tags located at offsets 0x0 and 0x4
# in the SPI flash.
# Tag format:
# bits[23:0] correspond to bits[31:8] of the Header SPI address
# Header is always on a 256-byte boundary.
# bits[31:24] = CRC8-ITU of bits[23:0].
# Notice there is no chip-select field in the Tag both Tag's point
# to the same flash part.
#
def BuildTag(args):
tag = bytearray([(args.header_loc >> 8) & 0xff,
(args.header_loc >> 16) & 0xff,
(args.header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
def BuildTagFromHdrAddr(header_loc):
tag = bytearray([(header_loc >> 8) & 0xff,
(header_loc >> 16) & 0xff,
(header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
#
# Creates temporary file for read/write
# Reads binary file containing LFW image_size (loader_file)
# Writes LFW image to temporary file
# Reads RO image at beginning of rorw_file up to image_size
# (assumes RO/RW images have been padded with 0xFF
# Returns temporary file name
#
def PacklfwRoImage(rorw_file, loader_file, image_size):
"""Create a temp file with the
first image_size bytes from the loader file and append bytes
from the rorw file.
return the filename"""
fo=tempfile.NamedTemporaryFile(delete=False) # Need to keep file around
with open(loader_file,'rb') as fin1: # read 4KB loader file
pro = fin1.read()
fo.write(pro) # write 4KB loader data to temp file
with open(rorw_file, 'rb') as fin:
ro = fin.read(image_size)
fo.write(ro)
fo.close()
return fo.name
#
# Generate a test EC_RW image of same size
# as original.
# Preserve image_data structure and fill all
# other bytes with 0xA5.
# useful for testing SPI read and EC build
# process hash generation.
#
def gen_test_ecrw(pldrw):
debug_print("gen_test_ecrw: pldrw type =", type(pldrw))
debug_print("len pldrw =", len(pldrw), " = ", hex(len(pldrw)))
cookie1_pos = pldrw.find(b'\x99\x88\x77\xce')
cookie2_pos = pldrw.find(b'\xdd\xbb\xaa\xce', cookie1_pos+4)
t = struct.unpack("<L", pldrw[cookie1_pos+0x24:cookie1_pos+0x28])
size = t[0]
debug_print("EC_RW size =", size, " = ", hex(size))
debug_print("Found cookie1 at ", hex(cookie1_pos))
debug_print("Found cookie2 at ", hex(cookie2_pos))
if cookie1_pos > 0 and cookie2_pos > cookie1_pos:
for i in range(0, cookie1_pos):
pldrw[i] = 0xA5
for i in range(cookie2_pos+4, len(pldrw)):
pldrw[i] = 0xA5
with open("ec_RW_test.bin", "wb") as fecrw:
fecrw.write(pldrw[:size])
def parseargs():
rpath = os.path.dirname(os.path.relpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input",
help="EC binary to pack, usually ec.bin or ec.RO.flat.",
metavar="EC_BIN", default="ec.bin")
parser.add_argument("-o", "--output",
help="Output flash binary file",
metavar="EC_SPI_FLASH", default="ec.packed.bin")
parser.add_argument("--loader_file",
help="EC loader binary",
default="ecloader.bin")
parser.add_argument("-s", "--spi_size", type=int,
help="Size of the SPI flash in KB",
default=512)
parser.add_argument("-l", "--header_loc", type=int,
help="Location of header in SPI flash",
default=0x1000)
parser.add_argument("-p", "--payload_offset", type=int,
help="The offset of payload from the start of header",
default=0x80)
parser.add_argument("-r", "--rw_loc", type=int,
help="Start offset of EC_RW. Default is -1 meaning 1/2 flash size",
default=-1)
parser.add_argument("--spi_clock", type=int,
help="SPI clock speed. 8, 12, 24, or 48 MHz.",
default=24)
parser.add_argument("--spi_read_cmd", type=int,
help="SPI read command. 0x3, 0xB, or 0x3B.",
default=0xb)
parser.add_argument("--image_size", type=int,
help="Size of a single image. Default 220KB",
default=(220 * 1024))
parser.add_argument("--test_spi", action='store_true',
help="Test SPI data integrity by adding CRC32 in last 4-bytes of RO/RW binaries",
default=False)
parser.add_argument("--test_ecrw", action='store_true',
help="Use fixed pattern for EC_RW but preserve image_data",
default=False)
parser.add_argument("--verbose", action='store_true',
help="Enable verbose output",
default=False)
return parser.parse_args()
# Debug helper routine
def dumpsects(spi_list):
debug_print("spi_list has {0} entries".format(len(spi_list)))
for s in spi_list:
debug_print("0x{0:x} 0x{1:x} {2:s}".format(s[0],len(s[1]),s[2]))
def printByteArrayAsHex(ba, title):
debug_print(title,"= ")
count = 0
for b in ba:
count = count + 1
debug_print("0x{0:02x}, ".format(b),end="")
if (count % 8) == 0:
debug_print("")
debug_print("\n")
def print_args(args):
debug_print("parsed arguments:")
debug_print(".input = ", args.input)
debug_print(".output = ", args.output)
debug_print(".loader_file = ", args.loader_file)
debug_print(".spi_size (KB) = ", hex(args.spi_size))
debug_print(".image_size = ", hex(args.image_size))
debug_print(".header_loc = ", hex(args.header_loc))
debug_print(".payload_offset = ", hex(args.payload_offset))
if args.rw_loc < 0:
debug_print(".rw_loc = ", args.rw_loc)
else:
debug_print(".rw_loc = ", hex(args.rw_loc))
debug_print(".spi_clock = ", args.spi_clock)
debug_print(".spi_read_cmd = ", args.spi_read_cmd)
debug_print(".test_spi = ", args.test_spi)
debug_print(".verbose = ", args.verbose)
#
# Handle quiet mode build from Makefile
# Quiet mode when V is unset or V=0
# Verbose mode when V=1
#
def main():
global debug_print
args = parseargs()
if args.verbose:
debug_print = print
debug_print("Begin MEC17xx pack_ec.py script")
# MEC17xx maximum 192KB each for RO & RW
# mec1701 chip Makefile sets args.spi_size = 512
# Tags at offset 0
#
print_args(args)
spi_size = args.spi_size * 1024
debug_print("SPI Flash image size in bytes =", hex(spi_size))
# !!! IMPORTANT !!!
# These values MUST match chip/mec1701/config_flash_layout.h
# defines.
# MEC17xx Boot-ROM TAGs are at offset 0 and 4.
# lfw + EC_RO starts at beginning of second 4KB sector
# EC_RW starts at offset 0x40000 (256KB)
spi_list = []
debug_print("args.input = ",args.input)
debug_print("args.loader_file = ",args.loader_file)
debug_print("args.image_size = ",hex(args.image_size))
rorofile=PacklfwRoImage(args.input, args.loader_file, args.image_size)
payload = GetPayload(rorofile)
payload_len = len(payload)
# debug
debug_print("EC_LFW + EC_RO length = ",hex(payload_len))
# SPI image integrity test
# compute CRC32 of EC_RO except for last 4 bytes
# skip over 4KB LFW
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload[LFW_SIZE:(payload_len - 4)]))
crc_ofs = payload_len - 4
debug_print("EC_RO CRC32 = 0x{0:08x} @ 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload[crc_ofs + i] = crc & 0xff
crc = crc >> 8
# Chromebooks are not using MEC BootROM ECDSA.
# We implemented the ECDSA disabled case where
# the 64-byte signature contains a SHA-256 of the binary plus
# 32 zeros bytes.
payload_signature = SignByteArray(payload)
# debug
printByteArrayAsHex(payload_signature, "LFW + EC_RO payload_signature")
# MEC17xx Header is 0x80 bytes with an 64 byte signature
# (32 byte SHA256 + 32 zero bytes)
header = BuildHeader(args, payload_len, LOAD_ADDR, rorofile)
# debug
printByteArrayAsHex(header, "Header LFW + EC_RO")
# MEC17xx payload ECDSA not used, 64 byte signature is
# SHA256 + 32 zero bytes
header_signature = SignByteArray(header)
# debug
printByteArrayAsHex(header_signature, "header_signature")
tag = BuildTag(args)
# MEC17xx truncate RW length to args.image_size to not overwrite LFW
# offset may be different due to Header size and other changes
# MCHP we want to append a SHA-256 to the end of the actual payload
# to test SPI read routines.
debug_print("Call to GetPayloadFromOffset")
debug_print("args.input = ", args.input)
debug_print("args.image_size = ", hex(args.image_size))
payload_rw = GetPayloadFromOffset(args.input, args.image_size)
debug_print("type(payload_rw) is ", type(payload_rw))
debug_print("len(payload_rw) is ", hex(len(payload_rw)))
# truncate to args.image_size
rw_len = args.image_size
payload_rw = payload_rw[:rw_len]
payload_rw_len = len(payload_rw)
debug_print("Truncated size of EC_RW = ", hex(payload_rw_len))
payload_entry_tuple = struct.unpack_from('<I', payload_rw, 4)
debug_print("payload_entry_tuple = ", payload_entry_tuple)
payload_entry = payload_entry_tuple[0]
debug_print("payload_entry = ", hex(payload_entry))
# Note: payload_rw is a bytearray therefore is mutable
if args.test_ecrw:
gen_test_ecrw(payload_rw)
# SPI image integrity test
# compute CRC32 of EC_RW except for last 4 bytes
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload_rw[:(payload_rw_len - 32)]))
crc_ofs = payload_rw_len - 4
debug_print("EC_RW CRC32 = 0x{0:08x} at offset 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload_rw[crc_ofs + i] = crc & 0xff
crc = crc >> 8
payload_rw_sig = SignByteArray(payload_rw)
# debug
printByteArrayAsHex(payload_rw_sig, "payload_rw_sig")
os.remove(rorofile) # clean up the temp file
# MEC170x Boot-ROM Tags are located at SPI offset 0
spi_list.append((0, tag, "tag"))
spi_list.append((args.header_loc, header, "header(lwf + ro)"))
spi_list.append((args.header_loc + HEADER_SIZE, header_signature,
"header(lwf + ro) signature"))
spi_list.append((args.header_loc + args.payload_offset, payload,
"payload(lfw + ro)"))
offset = args.header_loc + args.payload_offset + payload_len
# No SPI Header for EC_RW as its not loaded by BootROM
spi_list.append((offset, payload_signature,
"payload(lfw_ro) signature"))
# EC_RW location
rw_offset = int(spi_size // 2)
if args.rw_loc >= 0:
rw_offset = args.rw_loc
debug_print("rw_offset = 0x{0:08x}".format(rw_offset))
if rw_offset < offset + len(payload_signature):
print("ERROR: EC_RW overlaps EC_RO")
spi_list.append((rw_offset, payload_rw, "payload(rw)"))
# don't add to EC_RW. We don't know if Google will process
# EC SPI flash binary with other tools during build of
# coreboot and OS.
#offset = rw_offset + payload_rw_len
#spi_list.append((offset, payload_rw_sig, "payload(rw) signature"))
spi_list = sorted(spi_list)
dumpsects(spi_list)
#
# MEC17xx Boot-ROM locates TAG at SPI offset 0 instead of end of SPI.
#
with open(args.output, 'wb') as f:
debug_print("Write spi list to file", args.output)
addr = 0
for s in spi_list:
if addr < s[0]:
debug_print("Offset ",hex(addr)," Length", hex(s[0]-addr),
"fill with 0xff")
f.write(b'\xff' * (s[0] - addr))
addr = s[0]
debug_print("Offset ",hex(addr), " Length", hex(len(s[1])), "write data")
f.write(s[1])
addr += len(s[1])
if addr < spi_size:
debug_print("Offset ",hex(addr), " Length", hex(spi_size - addr),
"fill with 0xff")
f.write(b'\xff' * (spi_size - addr))
f.flush()
if __name__ == '__main__':
main()
| 31.832402
| 103
| 0.663449
|
import argparse
import hashlib
import os
import struct
import subprocess
import tempfile
import zlib
LFW_SIZE = 0x1000
LOAD_ADDR = 0x0E0000
LOAD_ADDR_RW = 0xE1000
HEADER_SIZE = 0x40
SPI_CLOCK_LIST = [48, 24, 16, 12]
SPI_READ_CMD_LIST = [0x3, 0xb, 0x3b, 0x6b]
CRC_TABLE = [0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15,
0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d]
def mock_print(*args, **kwargs):
pass
debug_print = mock_print
def Crc8(crc, data):
for v in data:
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v >> 4)]);
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v & 0xf)]);
return crc ^ 0x55
def GetEntryPoint(payload_file):
with open(payload_file, 'rb') as f:
f.seek(4)
s = f.read(4)
return struct.unpack('<I', s)[0]
def GetPayloadFromOffset(payload_file, offset):
with open(payload_file, 'rb') as f:
f.seek(offset)
payload = bytearray(f.read())
rem_len = len(payload) % 64
if rem_len:
payload += b'\0' * (64 - rem_len)
return payload
def GetPayload(payload_file):
return GetPayloadFromOffset(payload_file, 0)
def GetPublicKey(pem_file):
result = subprocess.run(['openssl', 'rsa', '-in', pem_file, '-text',
'-noout'], stdout=subprocess.PIPE, encoding='utf-8')
modulus_raw = []
in_modulus = False
for line in result.stdout.splitlines():
if line.startswith('modulus'):
in_modulus = True
elif not line.startswith(' '):
in_modulus = False
elif in_modulus:
modulus_raw.extend(line.strip().strip(':').split(':'))
if line.startswith('publicExponent'):
exp = int(line.split(' ')[1], 10)
modulus_raw.reverse()
modulus = bytearray((int(x, 16) for x in modulus_raw[:256]))
return struct.pack('<Q', exp), modulus
def GetSpiClockParameter(args):
assert args.spi_clock in SPI_CLOCK_LIST, \
"Unsupported SPI clock speed %d MHz" % args.spi_clock
return SPI_CLOCK_LIST.index(args.spi_clock)
def GetSpiReadCmdParameter(args):
assert args.spi_read_cmd in SPI_READ_CMD_LIST, \
"Unsupported SPI read command 0x%x" % args.spi_read_cmd
return SPI_READ_CMD_LIST.index(args.spi_read_cmd)
def PadZeroTo(data, size):
data.extend(b'\0' * (size - len(data)))
def BuildHeader(args, payload_len, load_addr, rorofile):
header = bytearray(b'PHCM\0')
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
b = 0
header.append(b)
header.append(GetSpiReadCmdParameter(args))
header.extend(struct.pack('<I', load_addr))
header.extend(struct.pack('<I', GetEntryPoint(rorofile)))
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
header.extend(struct.pack('<I', args.payload_offset))
PadZeroTo(header, 0x40)
return header
def BuildHeader2(args, payload_len, load_addr, payload_entry):
header = bytearray(b'PHCM\0')
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
b = 0
header.append(b)
header.append(GetSpiReadCmdParameter(args))
header.extend(struct.pack('<I', load_addr))
header.extend(struct.pack('<I', payload_entry))
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
header.extend(struct.pack('<I', args.payload_offset))
PadZeroTo(header, 0x40)
return header
def HashByteArray(data):
hasher = hashlib.sha256()
hasher.update(data)
h = hasher.digest()
bah = bytearray(h)
return bah
def SignByteArray(data):
debug_print("Signature is SHA-256 of data")
sigb = HashByteArray(data)
sigb.extend(b'\0' * 32)
return sigb
# to the same flash part.
#
def BuildTag(args):
tag = bytearray([(args.header_loc >> 8) & 0xff,
(args.header_loc >> 16) & 0xff,
(args.header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
def BuildTagFromHdrAddr(header_loc):
tag = bytearray([(header_loc >> 8) & 0xff,
(header_loc >> 16) & 0xff,
(header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
#
# Creates temporary file for read/write
# Reads binary file containing LFW image_size (loader_file)
# Writes LFW image to temporary file
# Reads RO image at beginning of rorw_file up to image_size
# (assumes RO/RW images have been padded with 0xFF
# Returns temporary file name
#
def PacklfwRoImage(rorw_file, loader_file, image_size):
fo=tempfile.NamedTemporaryFile(delete=False) # Need to keep file around
with open(loader_file,'rb') as fin1: # read 4KB loader file
pro = fin1.read()
fo.write(pro) # write 4KB loader data to temp file
with open(rorw_file, 'rb') as fin:
ro = fin.read(image_size)
fo.write(ro)
fo.close()
return fo.name
#
# Generate a test EC_RW image of same size
# as original.
# Preserve image_data structure and fill all
# other bytes with 0xA5.
# useful for testing SPI read and EC build
# process hash generation.
#
def gen_test_ecrw(pldrw):
debug_print("gen_test_ecrw: pldrw type =", type(pldrw))
debug_print("len pldrw =", len(pldrw), " = ", hex(len(pldrw)))
cookie1_pos = pldrw.find(b'\x99\x88\x77\xce')
cookie2_pos = pldrw.find(b'\xdd\xbb\xaa\xce', cookie1_pos+4)
t = struct.unpack("<L", pldrw[cookie1_pos+0x24:cookie1_pos+0x28])
size = t[0]
debug_print("EC_RW size =", size, " = ", hex(size))
debug_print("Found cookie1 at ", hex(cookie1_pos))
debug_print("Found cookie2 at ", hex(cookie2_pos))
if cookie1_pos > 0 and cookie2_pos > cookie1_pos:
for i in range(0, cookie1_pos):
pldrw[i] = 0xA5
for i in range(cookie2_pos+4, len(pldrw)):
pldrw[i] = 0xA5
with open("ec_RW_test.bin", "wb") as fecrw:
fecrw.write(pldrw[:size])
def parseargs():
rpath = os.path.dirname(os.path.relpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input",
help="EC binary to pack, usually ec.bin or ec.RO.flat.",
metavar="EC_BIN", default="ec.bin")
parser.add_argument("-o", "--output",
help="Output flash binary file",
metavar="EC_SPI_FLASH", default="ec.packed.bin")
parser.add_argument("--loader_file",
help="EC loader binary",
default="ecloader.bin")
parser.add_argument("-s", "--spi_size", type=int,
help="Size of the SPI flash in KB",
default=512)
parser.add_argument("-l", "--header_loc", type=int,
help="Location of header in SPI flash",
default=0x1000)
parser.add_argument("-p", "--payload_offset", type=int,
help="The offset of payload from the start of header",
default=0x80)
parser.add_argument("-r", "--rw_loc", type=int,
help="Start offset of EC_RW. Default is -1 meaning 1/2 flash size",
default=-1)
parser.add_argument("--spi_clock", type=int,
help="SPI clock speed. 8, 12, 24, or 48 MHz.",
default=24)
parser.add_argument("--spi_read_cmd", type=int,
help="SPI read command. 0x3, 0xB, or 0x3B.",
default=0xb)
parser.add_argument("--image_size", type=int,
help="Size of a single image. Default 220KB",
default=(220 * 1024))
parser.add_argument("--test_spi", action='store_true',
help="Test SPI data integrity by adding CRC32 in last 4-bytes of RO/RW binaries",
default=False)
parser.add_argument("--test_ecrw", action='store_true',
help="Use fixed pattern for EC_RW but preserve image_data",
default=False)
parser.add_argument("--verbose", action='store_true',
help="Enable verbose output",
default=False)
return parser.parse_args()
# Debug helper routine
def dumpsects(spi_list):
debug_print("spi_list has {0} entries".format(len(spi_list)))
for s in spi_list:
debug_print("0x{0:x} 0x{1:x} {2:s}".format(s[0],len(s[1]),s[2]))
def printByteArrayAsHex(ba, title):
debug_print(title,"= ")
count = 0
for b in ba:
count = count + 1
debug_print("0x{0:02x}, ".format(b),end="")
if (count % 8) == 0:
debug_print("")
debug_print("\n")
def print_args(args):
debug_print("parsed arguments:")
debug_print(".input = ", args.input)
debug_print(".output = ", args.output)
debug_print(".loader_file = ", args.loader_file)
debug_print(".spi_size (KB) = ", hex(args.spi_size))
debug_print(".image_size = ", hex(args.image_size))
debug_print(".header_loc = ", hex(args.header_loc))
debug_print(".payload_offset = ", hex(args.payload_offset))
if args.rw_loc < 0:
debug_print(".rw_loc = ", args.rw_loc)
else:
debug_print(".rw_loc = ", hex(args.rw_loc))
debug_print(".spi_clock = ", args.spi_clock)
debug_print(".spi_read_cmd = ", args.spi_read_cmd)
debug_print(".test_spi = ", args.test_spi)
debug_print(".verbose = ", args.verbose)
#
# Handle quiet mode build from Makefile
# Quiet mode when V is unset or V=0
# Verbose mode when V=1
#
def main():
global debug_print
args = parseargs()
if args.verbose:
debug_print = print
debug_print("Begin MEC17xx pack_ec.py script")
# MEC17xx maximum 192KB each for RO & RW
# mec1701 chip Makefile sets args.spi_size = 512
# Tags at offset 0
#
print_args(args)
spi_size = args.spi_size * 1024
debug_print("SPI Flash image size in bytes =", hex(spi_size))
# !!! IMPORTANT !!!
# These values MUST match chip/mec1701/config_flash_layout.h
# defines.
# MEC17xx Boot-ROM TAGs are at offset 0 and 4.
# lfw + EC_RO starts at beginning of second 4KB sector
# EC_RW starts at offset 0x40000 (256KB)
spi_list = []
debug_print("args.input = ",args.input)
debug_print("args.loader_file = ",args.loader_file)
debug_print("args.image_size = ",hex(args.image_size))
rorofile=PacklfwRoImage(args.input, args.loader_file, args.image_size)
payload = GetPayload(rorofile)
payload_len = len(payload)
# debug
debug_print("EC_LFW + EC_RO length = ",hex(payload_len))
# SPI image integrity test
# compute CRC32 of EC_RO except for last 4 bytes
# skip over 4KB LFW
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload[LFW_SIZE:(payload_len - 4)]))
crc_ofs = payload_len - 4
debug_print("EC_RO CRC32 = 0x{0:08x} @ 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload[crc_ofs + i] = crc & 0xff
crc = crc >> 8
# Chromebooks are not using MEC BootROM ECDSA.
# We implemented the ECDSA disabled case where
# the 64-byte signature contains a SHA-256 of the binary plus
# 32 zeros bytes.
payload_signature = SignByteArray(payload)
# debug
printByteArrayAsHex(payload_signature, "LFW + EC_RO payload_signature")
# MEC17xx Header is 0x80 bytes with an 64 byte signature
# (32 byte SHA256 + 32 zero bytes)
header = BuildHeader(args, payload_len, LOAD_ADDR, rorofile)
# debug
printByteArrayAsHex(header, "Header LFW + EC_RO")
# MEC17xx payload ECDSA not used, 64 byte signature is
# SHA256 + 32 zero bytes
header_signature = SignByteArray(header)
# debug
printByteArrayAsHex(header_signature, "header_signature")
tag = BuildTag(args)
# MEC17xx truncate RW length to args.image_size to not overwrite LFW
# offset may be different due to Header size and other changes
# MCHP we want to append a SHA-256 to the end of the actual payload
# to test SPI read routines.
debug_print("Call to GetPayloadFromOffset")
debug_print("args.input = ", args.input)
debug_print("args.image_size = ", hex(args.image_size))
payload_rw = GetPayloadFromOffset(args.input, args.image_size)
debug_print("type(payload_rw) is ", type(payload_rw))
debug_print("len(payload_rw) is ", hex(len(payload_rw)))
# truncate to args.image_size
rw_len = args.image_size
payload_rw = payload_rw[:rw_len]
payload_rw_len = len(payload_rw)
debug_print("Truncated size of EC_RW = ", hex(payload_rw_len))
payload_entry_tuple = struct.unpack_from('<I', payload_rw, 4)
debug_print("payload_entry_tuple = ", payload_entry_tuple)
payload_entry = payload_entry_tuple[0]
debug_print("payload_entry = ", hex(payload_entry))
# Note: payload_rw is a bytearray therefore is mutable
if args.test_ecrw:
gen_test_ecrw(payload_rw)
# SPI image integrity test
# compute CRC32 of EC_RW except for last 4 bytes
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload_rw[:(payload_rw_len - 32)]))
crc_ofs = payload_rw_len - 4
debug_print("EC_RW CRC32 = 0x{0:08x} at offset 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload_rw[crc_ofs + i] = crc & 0xff
crc = crc >> 8
payload_rw_sig = SignByteArray(payload_rw)
# debug
printByteArrayAsHex(payload_rw_sig, "payload_rw_sig")
os.remove(rorofile) # clean up the temp file
# MEC170x Boot-ROM Tags are located at SPI offset 0
spi_list.append((0, tag, "tag"))
spi_list.append((args.header_loc, header, "header(lwf + ro)"))
spi_list.append((args.header_loc + HEADER_SIZE, header_signature,
"header(lwf + ro) signature"))
spi_list.append((args.header_loc + args.payload_offset, payload,
"payload(lfw + ro)"))
offset = args.header_loc + args.payload_offset + payload_len
# No SPI Header for EC_RW as its not loaded by BootROM
spi_list.append((offset, payload_signature,
"payload(lfw_ro) signature"))
# EC_RW location
rw_offset = int(spi_size // 2)
if args.rw_loc >= 0:
rw_offset = args.rw_loc
debug_print("rw_offset = 0x{0:08x}".format(rw_offset))
if rw_offset < offset + len(payload_signature):
print("ERROR: EC_RW overlaps EC_RO")
spi_list.append((rw_offset, payload_rw, "payload(rw)"))
# don't add to EC_RW. We don't know if Google will process
# EC SPI flash binary with other tools during build of
# coreboot and OS.
#offset = rw_offset + payload_rw_len
#spi_list.append((offset, payload_rw_sig, "payload(rw) signature"))
spi_list = sorted(spi_list)
dumpsects(spi_list)
#
# MEC17xx Boot-ROM locates TAG at SPI offset 0 instead of end of SPI.
#
with open(args.output, 'wb') as f:
debug_print("Write spi list to file", args.output)
addr = 0
for s in spi_list:
if addr < s[0]:
debug_print("Offset ",hex(addr)," Length", hex(s[0]-addr),
"fill with 0xff")
f.write(b'\xff' * (s[0] - addr))
addr = s[0]
debug_print("Offset ",hex(addr), " Length", hex(len(s[1])), "write data")
f.write(s[1])
addr += len(s[1])
if addr < spi_size:
debug_print("Offset ",hex(addr), " Length", hex(spi_size - addr),
"fill with 0xff")
f.write(b'\xff' * (spi_size - addr))
f.flush()
if __name__ == '__main__':
main()
| true
| true
|
7908b0ee2182802ddd9ac057999a6f20f2de2801
| 4,652
|
py
|
Python
|
Secao7_ColecoesPython/Conjutos.py
|
PauloFTeixeira/curso_python
|
9040c7dcc5262620f6330bb9637710bb8899bc6b
|
[
"MIT"
] | null | null | null |
Secao7_ColecoesPython/Conjutos.py
|
PauloFTeixeira/curso_python
|
9040c7dcc5262620f6330bb9637710bb8899bc6b
|
[
"MIT"
] | null | null | null |
Secao7_ColecoesPython/Conjutos.py
|
PauloFTeixeira/curso_python
|
9040c7dcc5262620f6330bb9637710bb8899bc6b
|
[
"MIT"
] | null | null | null |
"""
Conjuntos são chamados de set's
- Set não possui duplicidade
- Set não possui valor ordenado
- Não são acessados via indice, ou seja, não são indexados
Bons para armazenar elementos são ordenação, sem se preocupar com chaves, valores e itens duplicados.
Set's são referenciados por {}
Diferença de set e dict
- Dict tem chave:valor
- Set tem apenas valor
---------------------------------------------------------------------------------------------------------------------
# DEFENINDO SET
# Forma 1
s = set ({1, 2, 3, 4, 5, 4, 5, 2, 1}) # valores duplicados
print(type(s))
print(s)
# OBS.: Ao criar um set, se uma valor estiver repetido, ele é ignorado, sem gerar erro.
# Forma 2 - Mais comum
set = {1, 2, 3, 4, 5, 4, 5, 2, 1} # valores duplicados
print(type(set))
print(set)
# Sem valores duplicados e sem ordenação entre eles
# Pode-se colocar todos os tipos de dados
---------------------------------------------------------------------------------------------------------------------
# PODE-SE ITERAR SOBRE UM SET
set = {1, 2, 3, 4, 5, 4, 5, 2, 1}
for valor in set:
print(valor)
---------------------------------------------------------------------------------------------------------------------
# USOS INTERESSANTES COM SET'S
# Imagine que fizemos um formulario de cadastro de visitantes em um museu, onde as pessoas informam manualmente
# sua cidade de origem
# Nos adicionamos cada cidade em uma lista Python, ja que em lista pode-se adicionar novos elementos e ter repetição
cidade = ['Lavras', 'Bagé', 'Caçapava', 'Lavras', 'Bagé']
print(type(cidade))
print(cidade)
print(len(cidade)) # para saber quantos visitantes teve
print(len(set(cidade))) # para saber quantas cidades distintas foram visitar
---------------------------------------------------------------------------------------------------------------------
# ADICIONANDO ELEMENTOS EM UM SET
s = {1, 2, 3}
s.add(4)
print(s)
---------------------------------------------------------------------------------------------------------------------
# REMOVANDO ELEMENTOS DE UM SET
# Forma 1
conj = {1, 2, 3}
conj.remove(3) # se tentar remover um valor que não existe, gera um erro.
print(conj)
# Forma 2
conj.discard(2) # se o elemento não existir, não vai gerar erro
print(conj)
---------------------------------------------------------------------------------------------------------------------
# COPIANDO UM SET PARA OUTRO
conj = {1, 2, 3}
# Forma 1 - Deep Copy (o novo conjunto fica independente)
novo = conj.copy()
print(novo)
novo.add(4)
print(conj, novo)
# Forma 2 - Shallow Copy (o novo conjunto fica interligado ao primeiro)
novo2 = conj
print(novo2)
novo2.add(5)
print(conj, novo2)
---------------------------------------------------------------------------------------------------------------------
# REMOVER TODOS OS DADOS DE UM SET
conj = {1, 2, 3}
conj.clear()
print(conj)
---------------------------------------------------------------------------------------------------------------------
# METODOS MATEMÁTICOS DE CONJUNTOS
# Dois conjuntos de estudantes, Python e Java.
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Precisamos juntar em um set, os alunos dos dois cursos, mas apenas nomes únicos
# Forma 1 - usando union
unicos = python.union(java)
print(unicos)
# Forma 2 - Usando o caracter pipe "|"
unicos2 = python|java
print(unicos2)
---------------------------------------------------------------------------------------------------------------------
# GERANDO SET DE ESTUDANTES QUE ESTÃO NOS DOIS CURSOS
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Forma 1 - usando intersection
ambos = python.intersection(java)
print(ambos)
# Forma 2 - usando &
ambos2 = python & java
print(ambos2)
---------------------------------------------------------------------------------------------------------------------
# GERAR SET DE ESTUDANTES QUE ESTÃ EM UM CURSO, MAS QUE NÃO ESTÃO NO OUTRO
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
so_python = python.difference(java)
print(so_python)
---------------------------------------------------------------------------------------------------------------------
# SOMA*, MÁXIMO*, MÍNIMO*, TAMANHO.
# * -> somente valores inteiros ou float
conj = {1, 2, 3, 4, 5}
print(sum(conj))
print(max(conj))
print(min(conj))
print(len(conj))
---------------------------------------------------------------------------------------------------------------------
"""
| 31.863014
| 117
| 0.479364
| true
| true
|
|
7908b1c81568977039d30ef691044a3d153df351
| 10,516
|
py
|
Python
|
akanda/horizon/api/neutron_extensions_client.py
|
dreamhost/akanda-horizon
|
c2a3771f620245d31e7c84ba38bbf440f5161fb6
|
[
"Apache-2.0"
] | 1
|
2015-02-23T16:59:55.000Z
|
2015-02-23T16:59:55.000Z
|
akanda/horizon/api/neutron_extensions_client.py
|
dreamhost/akanda-horizon
|
c2a3771f620245d31e7c84ba38bbf440f5161fb6
|
[
"Apache-2.0"
] | null | null | null |
akanda/horizon/api/neutron_extensions_client.py
|
dreamhost/akanda-horizon
|
c2a3771f620245d31e7c84ba38bbf440f5161fb6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DreamHost Neutron Extensions
# @author: Murali Raju, New Dream Network, LLC (DreamHost)
# @author: Rosario Disomma, New Dream Network, LLC (DreamHost)
import logging
from openstack_dashboard.api import nova
from openstack_dashboard.api import neutron
from openstack_dashboard.api.neutron import neutronclient
from neutronclient.common.exceptions import PortNotFoundClient
from akanda.horizon.common import (
NEW_PROTOCOL_CHOICES_DICT, POLICY_CHOICES_DICT)
LOG = logging.getLogger(__name__)
def get_protocol(value):
return NEW_PROTOCOL_CHOICES_DICT[value]
class Port(object):
def __init__(self, alias_name, protocol, port, id=None):
self.alias_name = alias_name
self.protocol = protocol
self.port = port
self.id = id
def display_protocol(self):
return get_protocol(self.protocol)
class AddressGroup(object):
def __init__(self, name, id=None):
self.name = name
self.id = id
class Network(object):
def __init__(self, alias_name, cidr, id=None):
self.alias_name = alias_name
self.cidr = cidr
self.id = id
class FilterRule(object):
def __init__(self, source, source_public_port,
destination, destination_public_port,
protocol, policy, request, id=None):
self.policy = policy
self.source = source
self.source_public_port = source_public_port
self.destination = destination
self.destination_public_port = destination_public_port
self.protocol = protocol
self.request = request
self.id = id
def display_policy(self):
return POLICY_CHOICES_DICT[self.policy]
def display_source_group(self):
if self.source:
return self.source['name']
return ''
def display_destination_group(self):
if self.destination:
return self.destination['name']
return ''
def display_source_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.source_public_port)
def display_destination_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.destination_public_port)
class PortForwardingRule(object):
def __init__(self, rule_name, public_port,
protocol, private_port, port,
request, id=None):
self.rule_name = rule_name
self.public_port = public_port
self.protocol = protocol
self.private_port = private_port
self.port = port
self.request = request
self.id = id
def display_public_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.public_port)
def display_private_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.private_port)
def display_instance(self):
try:
instance = nova.server_get(self.request, self.port['device_id'])
return instance.name
except:
return '--'
def _mk_url(*args):
path = '/'.join(args).lstrip('/')
if not path.startswith('/'):
path = '/' + path
return path
def _list(request, path):
return neutronclient(request).get(_mk_url(path))
def _get(request, path, obj_id):
return neutronclient(request).get(_mk_url(path, obj_id))
def _create(request, path, body):
return neutronclient(request).post(_mk_url(path), body=body)
def _put(request, path, obj_id, body):
return neutronclient(request).put(_mk_url(path, obj_id), body=body)
def _delete(request, path, obj_id):
return neutronclient(request).delete(_mk_url(path, obj_id))
def portalias_list(request):
r = _list(request, 'dhportalias')
return [Port(item['name'], item['protocol'], item['port'], item['id'])
for item in r.get('portaliases', {})]
def portalias_get(request, obj_id):
r = _get(request, 'dhportalias', obj_id)
return r.get('portalias', {})
def portalias_create(request, body):
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_create(): body = %s" % body)
return _create(request, 'dhportalias', portalias)
def portalias_update(request, body):
obj_id = body.pop('id', '')
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_update(): body = %s" % body)
return _put(request, 'dhportalias', obj_id, portalias)
def portalias_delete(request, obj_id):
return _delete(request, 'dhportalias', obj_id)
def addressgroup_list(request):
r = _list(request, 'dhaddressgroup')
return [AddressGroup(item['name'], item['id'])
for item in r.get('addressgroups', {})]
def addressgroup_get(request, obj_id):
r = _get(request, 'dhaddressgroup', obj_id)
return r.get('addressgroup', {})
def addressgroup_create(request, body):
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_create(): body = %s" % body)
return _create(request, 'dhaddressgroup', addressgroup)
def addressgroup_update(request, body):
obj_id = body.pop('id', '')
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_update(): body = %s" % body)
return _put(request, 'dhaddressgroup', obj_id, addressgroup)
def addressgroup_delete(request, obj_id):
return _delete(request, 'dhaddressgroup', obj_id)
def networkalias_list(request):
r = _list(request, 'dhaddressentry')
return [Network(item['name'], item['cidr'], item['id'])
for item in r.get('addressentries', {})]
def networkalias_get(request, obj_id):
r = _get(request, 'dhaddressentry', obj_id)
return r.get('addressentry', {})
def networkalias_create(request, body):
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
'group_id': body['group']
}}
LOG.debug("networkalias_create(): body = %s" % body)
return _create(request, 'dhaddressentry', networkalias)
def networkalias_update(request, body):
obj_id = body.pop('id', '')
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
}}
LOG.debug("networkalias_update(): body = %s" % body)
return _put(request, 'dhaddressentry', obj_id, networkalias)
def networkalias_delete(request, obj_id):
return _delete(request, 'dhaddressentry', obj_id)
def filterrule_list(request):
r = _list(request, 'dhfilterrule')
return [FilterRule(item.get('source'), item['source_port'],
item.get('destination'), item['destination_port'],
item['protocol'], item['action'], request, item['id'])
for item in r.get('filterrules', {})]
def filterrule_get(request, obj_id):
r = _get(request, 'dhfilterrule', obj_id)
return r.get('filterrule', {})
def filterrule_create(request, body):
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_create(): body = %s" % body)
return _create(request, 'dhfilterrule', filterrule)
def filterrule_update(request, body):
obj_id = body.pop('id', '')
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_update(): body = %s" % body)
return _put(request, 'dhfilterrule', obj_id, filterrule)
def filterrule_delete(request, obj_id):
return _delete(request, 'dhfilterrule', obj_id)
def portforward_list(request):
r = _list(request, 'dhportforward')
return [PortForwardingRule(item['name'], item['public_port'],
item['protocol'], item['private_port'],
item['port'], request, item['id'])
for item in r.get('portforwards', {})]
def portforward_get(request, obj_id):
r = _get(request, 'dhportforward', obj_id)
return r.get('portforward', {})
def portforward_create(request, body):
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_create(): body = %s" % body)
return _create(request, 'dhportforward', portforward)
def portforward_update(request, body):
obj_id = body.pop('id', '')
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'instance_id': body['instance'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_update(): body = %s" % body)
return _put(request, 'dhportforward', obj_id, portforward)
def portforward_delete(request, obj_id):
return _delete(request, 'dhportforward', obj_id)
| 29.790368
| 77
| 0.641308
|
import logging
from openstack_dashboard.api import nova
from openstack_dashboard.api import neutron
from openstack_dashboard.api.neutron import neutronclient
from neutronclient.common.exceptions import PortNotFoundClient
from akanda.horizon.common import (
NEW_PROTOCOL_CHOICES_DICT, POLICY_CHOICES_DICT)
LOG = logging.getLogger(__name__)
def get_protocol(value):
return NEW_PROTOCOL_CHOICES_DICT[value]
class Port(object):
def __init__(self, alias_name, protocol, port, id=None):
self.alias_name = alias_name
self.protocol = protocol
self.port = port
self.id = id
def display_protocol(self):
return get_protocol(self.protocol)
class AddressGroup(object):
def __init__(self, name, id=None):
self.name = name
self.id = id
class Network(object):
def __init__(self, alias_name, cidr, id=None):
self.alias_name = alias_name
self.cidr = cidr
self.id = id
class FilterRule(object):
def __init__(self, source, source_public_port,
destination, destination_public_port,
protocol, policy, request, id=None):
self.policy = policy
self.source = source
self.source_public_port = source_public_port
self.destination = destination
self.destination_public_port = destination_public_port
self.protocol = protocol
self.request = request
self.id = id
def display_policy(self):
return POLICY_CHOICES_DICT[self.policy]
def display_source_group(self):
if self.source:
return self.source['name']
return ''
def display_destination_group(self):
if self.destination:
return self.destination['name']
return ''
def display_source_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.source_public_port)
def display_destination_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.destination_public_port)
class PortForwardingRule(object):
def __init__(self, rule_name, public_port,
protocol, private_port, port,
request, id=None):
self.rule_name = rule_name
self.public_port = public_port
self.protocol = protocol
self.private_port = private_port
self.port = port
self.request = request
self.id = id
def display_public_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.public_port)
def display_private_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.private_port)
def display_instance(self):
try:
instance = nova.server_get(self.request, self.port['device_id'])
return instance.name
except:
return '--'
def _mk_url(*args):
path = '/'.join(args).lstrip('/')
if not path.startswith('/'):
path = '/' + path
return path
def _list(request, path):
return neutronclient(request).get(_mk_url(path))
def _get(request, path, obj_id):
return neutronclient(request).get(_mk_url(path, obj_id))
def _create(request, path, body):
return neutronclient(request).post(_mk_url(path), body=body)
def _put(request, path, obj_id, body):
return neutronclient(request).put(_mk_url(path, obj_id), body=body)
def _delete(request, path, obj_id):
return neutronclient(request).delete(_mk_url(path, obj_id))
def portalias_list(request):
r = _list(request, 'dhportalias')
return [Port(item['name'], item['protocol'], item['port'], item['id'])
for item in r.get('portaliases', {})]
def portalias_get(request, obj_id):
r = _get(request, 'dhportalias', obj_id)
return r.get('portalias', {})
def portalias_create(request, body):
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_create(): body = %s" % body)
return _create(request, 'dhportalias', portalias)
def portalias_update(request, body):
obj_id = body.pop('id', '')
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_update(): body = %s" % body)
return _put(request, 'dhportalias', obj_id, portalias)
def portalias_delete(request, obj_id):
return _delete(request, 'dhportalias', obj_id)
def addressgroup_list(request):
r = _list(request, 'dhaddressgroup')
return [AddressGroup(item['name'], item['id'])
for item in r.get('addressgroups', {})]
def addressgroup_get(request, obj_id):
r = _get(request, 'dhaddressgroup', obj_id)
return r.get('addressgroup', {})
def addressgroup_create(request, body):
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_create(): body = %s" % body)
return _create(request, 'dhaddressgroup', addressgroup)
def addressgroup_update(request, body):
obj_id = body.pop('id', '')
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_update(): body = %s" % body)
return _put(request, 'dhaddressgroup', obj_id, addressgroup)
def addressgroup_delete(request, obj_id):
return _delete(request, 'dhaddressgroup', obj_id)
def networkalias_list(request):
r = _list(request, 'dhaddressentry')
return [Network(item['name'], item['cidr'], item['id'])
for item in r.get('addressentries', {})]
def networkalias_get(request, obj_id):
r = _get(request, 'dhaddressentry', obj_id)
return r.get('addressentry', {})
def networkalias_create(request, body):
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
'group_id': body['group']
}}
LOG.debug("networkalias_create(): body = %s" % body)
return _create(request, 'dhaddressentry', networkalias)
def networkalias_update(request, body):
obj_id = body.pop('id', '')
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
}}
LOG.debug("networkalias_update(): body = %s" % body)
return _put(request, 'dhaddressentry', obj_id, networkalias)
def networkalias_delete(request, obj_id):
return _delete(request, 'dhaddressentry', obj_id)
def filterrule_list(request):
r = _list(request, 'dhfilterrule')
return [FilterRule(item.get('source'), item['source_port'],
item.get('destination'), item['destination_port'],
item['protocol'], item['action'], request, item['id'])
for item in r.get('filterrules', {})]
def filterrule_get(request, obj_id):
r = _get(request, 'dhfilterrule', obj_id)
return r.get('filterrule', {})
def filterrule_create(request, body):
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_create(): body = %s" % body)
return _create(request, 'dhfilterrule', filterrule)
def filterrule_update(request, body):
obj_id = body.pop('id', '')
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_update(): body = %s" % body)
return _put(request, 'dhfilterrule', obj_id, filterrule)
def filterrule_delete(request, obj_id):
return _delete(request, 'dhfilterrule', obj_id)
def portforward_list(request):
r = _list(request, 'dhportforward')
return [PortForwardingRule(item['name'], item['public_port'],
item['protocol'], item['private_port'],
item['port'], request, item['id'])
for item in r.get('portforwards', {})]
def portforward_get(request, obj_id):
r = _get(request, 'dhportforward', obj_id)
return r.get('portforward', {})
def portforward_create(request, body):
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_create(): body = %s" % body)
return _create(request, 'dhportforward', portforward)
def portforward_update(request, body):
obj_id = body.pop('id', '')
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'instance_id': body['instance'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_update(): body = %s" % body)
return _put(request, 'dhportforward', obj_id, portforward)
def portforward_delete(request, obj_id):
return _delete(request, 'dhportforward', obj_id)
| true
| true
|
7908b4873b7bca07cf5be458f286da8312e2397c
| 4,083
|
py
|
Python
|
tests/test_json.py
|
NextChance/redbeat
|
847b69fdfed0bd19a2a9b9a55c71dc0aa83ae7ea
|
[
"Apache-2.0"
] | null | null | null |
tests/test_json.py
|
NextChance/redbeat
|
847b69fdfed0bd19a2a9b9a55c71dc0aa83ae7ea
|
[
"Apache-2.0"
] | null | null | null |
tests/test_json.py
|
NextChance/redbeat
|
847b69fdfed0bd19a2a9b9a55c71dc0aa83ae7ea
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import json
from unittest import TestCase
from celery.schedules import schedule, crontab
try: # celery 3.x
from celery.utils.timeutils import timezone
except ImportError: # celery 4.x
from celery.utils.time import timezone
from redbeat.decoder import RedBeatJSONDecoder, RedBeatJSONEncoder
from redbeat.schedules import rrule
class JSONTestCase(TestCase):
def dumps(self, d):
return json.dumps(d, cls=RedBeatJSONEncoder)
def loads(self, d):
return json.loads(d, cls=RedBeatJSONDecoder)
def datetime(self, **kwargs):
d = {
'__type__': 'datetime',
'year': 2015,
'month': 12,
'day': 30,
'hour': 12,
'minute': 59,
'second': 22,
'microsecond': 333,
}
d.update(kwargs)
return d
def schedule(self, **kwargs):
d = {
'__type__': 'interval',
'every': 60.0,
'relative': False,
}
d.update(kwargs)
return d
def crontab(self, **kwargs):
d = {
'__type__': 'crontab',
'minute': '*',
'hour': '*',
'day_of_week': '*',
'day_of_month': '*',
'month_of_year': '*',
}
d.update(kwargs)
return d
def rrule(self, **kwargs):
d = {
'__type__': 'rrule',
'freq': 5,
'dtstart': 1451480362,
'interval': 1,
'wkst': None,
'count': 1,
'until': None,
'bysetpos': None,
'bymonth': None,
'bymonthday': None,
'byyearday': None,
'byeaster': None,
'byweekno': None,
'byweekday': None,
'byhour': None,
'byminute': None,
'bysecond': None,
}
d.update(kwargs)
return d
class RedBeatJSONEncoderTestCase(JSONTestCase):
def test_datetime(self):
dt = datetime.now()
result = self.dumps(dt)
expected = self.datetime()
for key in (k for k in expected if hasattr(dt, k)):
expected[key] = getattr(dt, key)
self.assertEqual(result, json.dumps(expected))
def test_schedule(self):
s = schedule(run_every=60.0)
result = self.dumps(s)
self.assertEqual(result, json.dumps(self.schedule(every=60.0)))
def test_crontab(self):
c = crontab()
result = self.dumps(c)
self.assertEqual(result, json.dumps(self.crontab()))
def test_rrule(self):
r = rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1)
result = self.dumps(r)
self.assertEqual(result, json.dumps(self.rrule()))
def test_rrule_timezone(self):
tz = timezone.get_timezone('US/Eastern')
start1 = datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc)
start2 = start1.astimezone(tz)
r1 = rrule('MINUTELY', dtstart=start1, count=1)
r2 = rrule('MINUTELY', dtstart=start2, count=1)
self.assertEqual(self.dumps(r1), self.dumps(r2))
class RedBeatJSONDecoderTestCase(JSONTestCase):
def test_datetime(self):
d = self.datetime()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, datetime(tzinfo=timezone.utc, **d))
def test_schedule(self):
d = self.schedule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, schedule(run_every=60))
def test_crontab(self):
d = self.crontab()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, crontab())
def test_rrule(self):
d = self.rrule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(
result,
rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1),
)
| 26.006369
| 104
| 0.546657
|
from datetime import datetime
import json
from unittest import TestCase
from celery.schedules import schedule, crontab
try:
from celery.utils.timeutils import timezone
except ImportError:
from celery.utils.time import timezone
from redbeat.decoder import RedBeatJSONDecoder, RedBeatJSONEncoder
from redbeat.schedules import rrule
class JSONTestCase(TestCase):
def dumps(self, d):
return json.dumps(d, cls=RedBeatJSONEncoder)
def loads(self, d):
return json.loads(d, cls=RedBeatJSONDecoder)
def datetime(self, **kwargs):
d = {
'__type__': 'datetime',
'year': 2015,
'month': 12,
'day': 30,
'hour': 12,
'minute': 59,
'second': 22,
'microsecond': 333,
}
d.update(kwargs)
return d
def schedule(self, **kwargs):
d = {
'__type__': 'interval',
'every': 60.0,
'relative': False,
}
d.update(kwargs)
return d
def crontab(self, **kwargs):
d = {
'__type__': 'crontab',
'minute': '*',
'hour': '*',
'day_of_week': '*',
'day_of_month': '*',
'month_of_year': '*',
}
d.update(kwargs)
return d
def rrule(self, **kwargs):
d = {
'__type__': 'rrule',
'freq': 5,
'dtstart': 1451480362,
'interval': 1,
'wkst': None,
'count': 1,
'until': None,
'bysetpos': None,
'bymonth': None,
'bymonthday': None,
'byyearday': None,
'byeaster': None,
'byweekno': None,
'byweekday': None,
'byhour': None,
'byminute': None,
'bysecond': None,
}
d.update(kwargs)
return d
class RedBeatJSONEncoderTestCase(JSONTestCase):
def test_datetime(self):
dt = datetime.now()
result = self.dumps(dt)
expected = self.datetime()
for key in (k for k in expected if hasattr(dt, k)):
expected[key] = getattr(dt, key)
self.assertEqual(result, json.dumps(expected))
def test_schedule(self):
s = schedule(run_every=60.0)
result = self.dumps(s)
self.assertEqual(result, json.dumps(self.schedule(every=60.0)))
def test_crontab(self):
c = crontab()
result = self.dumps(c)
self.assertEqual(result, json.dumps(self.crontab()))
def test_rrule(self):
r = rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1)
result = self.dumps(r)
self.assertEqual(result, json.dumps(self.rrule()))
def test_rrule_timezone(self):
tz = timezone.get_timezone('US/Eastern')
start1 = datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc)
start2 = start1.astimezone(tz)
r1 = rrule('MINUTELY', dtstart=start1, count=1)
r2 = rrule('MINUTELY', dtstart=start2, count=1)
self.assertEqual(self.dumps(r1), self.dumps(r2))
class RedBeatJSONDecoderTestCase(JSONTestCase):
def test_datetime(self):
d = self.datetime()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, datetime(tzinfo=timezone.utc, **d))
def test_schedule(self):
d = self.schedule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, schedule(run_every=60))
def test_crontab(self):
d = self.crontab()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, crontab())
def test_rrule(self):
d = self.rrule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(
result,
rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1),
)
| true
| true
|
7908b561a7b7a380037443834aedfc664f8a7a7c
| 2,969
|
py
|
Python
|
src/firebaseops.py
|
txsmith/p1-sensor
|
6d9ae0a5d8f0e17a720781c4594453ccd848df30
|
[
"MIT"
] | null | null | null |
src/firebaseops.py
|
txsmith/p1-sensor
|
6d9ae0a5d8f0e17a720781c4594453ccd848df30
|
[
"MIT"
] | null | null | null |
src/firebaseops.py
|
txsmith/p1-sensor
|
6d9ae0a5d8f0e17a720781c4594453ccd848df30
|
[
"MIT"
] | null | null | null |
import logging
import pyrebase
from requests.exceptions import HTTPError
class Node:
def __init__(self, nodeName):
self._nodeName = nodeName
self._next = None
def child(self, nodeName):
if self._next == None:
self._next = Node(nodeName)
else:
self._next.next(nodeName)
return self
def set(self, data):
if self._next == None:
self._next = Set(data)
else:
self._next.set(data)
return self
def get(self):
if self._next == None:
self._next = Get()
else:
self._next.get()
return self
def eval(self, prev):
return self._next.eval(prev.child(self._nodeName))
def __str__(self):
if self._next == None:
return 'child(' + str(self._nodeName) + ')'
else:
return 'child(' + str(self._nodeName) + ').' + str(self._next)
class Set:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.set(self._data)
def __str__(self):
return 'set(' + str(self._data) + ')'
class Get:
def eval(self, prev):
return prev.get()
def __str__(self):
return 'get()'
class Remove:
def eval(self, prev):
return prev.remove()
def __str__(self):
return 'remove()'
class Push:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.push(self._data)
def __str__(self):
return 'push(' + str(self._data) + ')'
class Update:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.update(self._data)
def __str__(self):
return 'update(' + str(self._data) + ')'
class FirebaseLiveEvaluator:
def __init__(self, config):
logging.info('Initializing Firebase connection...')
self._firebase = pyrebase.initialize_app(config)
self._db = self._firebase.database()
self._pathPrefix = config['firebasePathPrefix']
def eval(self, node):
# logging.debug(node)
if self._pathPrefix:
return node.eval(self._db.child(self._pathPrefix))
else:
return node.eval(self._db)
class FirebaseLoggingEvaluator:
def eval(self, node):
logging.info(node)
class FirebaseExceptionEvaluator:
def __init__(self, config):
logging.info('Initializing Firebase connection...')
self._firebase = pyrebase.initialize_app(config)
self._db = self._firebase.database()
self._pathPrefix = config['firebasePathPrefix']
self._throw = True
def eval(self, node):
if self._throw:
self._throw = False
raise HTTPError("I Broke")
logging.debug(node)
if self._pathPrefix:
return node.eval(self._db.child(self._pathPrefix))
else:
return node.eval(self._db)
| 24.336066
| 74
| 0.584035
|
import logging
import pyrebase
from requests.exceptions import HTTPError
class Node:
def __init__(self, nodeName):
self._nodeName = nodeName
self._next = None
def child(self, nodeName):
if self._next == None:
self._next = Node(nodeName)
else:
self._next.next(nodeName)
return self
def set(self, data):
if self._next == None:
self._next = Set(data)
else:
self._next.set(data)
return self
def get(self):
if self._next == None:
self._next = Get()
else:
self._next.get()
return self
def eval(self, prev):
return self._next.eval(prev.child(self._nodeName))
def __str__(self):
if self._next == None:
return 'child(' + str(self._nodeName) + ')'
else:
return 'child(' + str(self._nodeName) + ').' + str(self._next)
class Set:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.set(self._data)
def __str__(self):
return 'set(' + str(self._data) + ')'
class Get:
def eval(self, prev):
return prev.get()
def __str__(self):
return 'get()'
class Remove:
def eval(self, prev):
return prev.remove()
def __str__(self):
return 'remove()'
class Push:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.push(self._data)
def __str__(self):
return 'push(' + str(self._data) + ')'
class Update:
def __init__(self, data):
self._data = data
def eval(self, prev):
return prev.update(self._data)
def __str__(self):
return 'update(' + str(self._data) + ')'
class FirebaseLiveEvaluator:
def __init__(self, config):
logging.info('Initializing Firebase connection...')
self._firebase = pyrebase.initialize_app(config)
self._db = self._firebase.database()
self._pathPrefix = config['firebasePathPrefix']
def eval(self, node):
if self._pathPrefix:
return node.eval(self._db.child(self._pathPrefix))
else:
return node.eval(self._db)
class FirebaseLoggingEvaluator:
def eval(self, node):
logging.info(node)
class FirebaseExceptionEvaluator:
def __init__(self, config):
logging.info('Initializing Firebase connection...')
self._firebase = pyrebase.initialize_app(config)
self._db = self._firebase.database()
self._pathPrefix = config['firebasePathPrefix']
self._throw = True
def eval(self, node):
if self._throw:
self._throw = False
raise HTTPError("I Broke")
logging.debug(node)
if self._pathPrefix:
return node.eval(self._db.child(self._pathPrefix))
else:
return node.eval(self._db)
| true
| true
|
7908b5ecc794b157f3dbd63b63ab3a5f1b181d73
| 29,531
|
py
|
Python
|
discord/utils.py
|
b4skyx/enhanced-discord.py
|
75a23351c4a484a3511c0b653965d229aa26833c
|
[
"MIT"
] | 1,126
|
2021-08-28T12:09:26.000Z
|
2022-03-31T16:37:54.000Z
|
discord/utils.py
|
b4skyx/enhanced-discord.py
|
75a23351c4a484a3511c0b653965d229aa26833c
|
[
"MIT"
] | 89
|
2021-08-28T14:46:11.000Z
|
2022-03-04T11:19:11.000Z
|
discord/utils.py
|
b4skyx/enhanced-discord.py
|
75a23351c4a484a3511c0b653965d229aa26833c
|
[
"MIT"
] | 111
|
2021-08-28T02:04:22.000Z
|
2022-03-05T17:48:31.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original) # type: ignore
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
| 28.867058
| 118
| 0.61085
|
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original)
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning)
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning)
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
_all = all
attrget = attrgetter
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str:
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data))
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
from .invite import Invite
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
from .template import Template
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
| true
| true
|
7908b605ad945b6b9a393ebe29c3f6c6c4c027fc
| 12,833
|
py
|
Python
|
Community/AssetManagement/lumeta_workflow_page.py
|
npatellumeta/gateway-workflows
|
c0800181aaece295e734e151c457ce5d7245ca6f
|
[
"Apache-2.0"
] | null | null | null |
Community/AssetManagement/lumeta_workflow_page.py
|
npatellumeta/gateway-workflows
|
c0800181aaece295e734e151c457ce5d7245ca6f
|
[
"Apache-2.0"
] | null | null | null |
Community/AssetManagement/lumeta_workflow_page.py
|
npatellumeta/gateway-workflows
|
c0800181aaece295e734e151c457ce5d7245ca6f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 BlueCat Networks. All rights reserved.
import ipaddress
from flask import request, g, abort, jsonify
from bluecat.api_exception import PortalException, APIException
from bluecat import route, util
from main_app import app
# application config
# Define global variable to hold handle to API object
api = None
#
# GET, PUT or POST
#
@route(app, '/lumeta/getnetworklist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def get_networks_get_networks_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
configurations = None
configurations_json = []
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print (c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
configurations_json.append(configuration_json)
return jsonify(configurations_json)
@route(app, '/lumeta/getiplist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def getiplist_getiplist_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
networks = []
# Return object that contains all the networks (and eventually all ip addresses)
# list of all properties objects
ip_addresses = []
# If name is given, use get_configuration(name)
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print(c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
# FIXME - need code to get network list from configuration id. Is there a call to get children_of_types
# (['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network'
# use get_by_object_types(*, ['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network']) - returns flat list
# We might want to request IP4Network, IP6Network
# FIXME - extract below code in a function and call it for IP4Block and IP6Block
try:
for nw in c.get_children_of_type('IP4Block'):
print(nw)
# get all blocks and networks for block
for n in g.user.get_api().get_by_object_types(nw.get_property('CIDR'),
['IP4Network', 'IP4Block', 'IP6Network', 'IP6Block']):
if '6' in n.get_type():
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['prefix']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
else:
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['CIDR']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
except Exception as e:
app.loggererror('get_subnets: ' + e.message)
return jsonify(ip_addresses)
def calculate_network_stats(bam_network, config_id, config_name):
if bam_network.get_type() == 'IP4Network':
network_address = bam_network.get_property('CIDR')
network = ipaddress.ip_network(network_address)
else:
network_address = bam_network.get_property('prefix')
network = ipaddress.ip_network(network_address)
ip_addresses = []
ip_data = {}
if bam_network.get_type() == 'IP4Network':
# run below for IP4Address, IP6Address - properties will be populated as well
for n in bam_network.get_children_of_type('IP4Address'):
# Sometimes below list contains all ip addresses and sometimes only one for gateway address
# Look through n.get_properties() and add them to ip_data
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
next_address = bam_network.get_next_available_ip4_address()
else:
for n in bam_network.get_children_of_type('IP6Address'):
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
#return network_data
return ip_addresses
def calculate_block_stats(bam_block, config_id, config_name):
if bam_block.get_type() == 'IP6Block':
block_address = bam_block.get_property('prefix')
block = ipaddress.ip_network(block_address)
else:
block_address = bam_block.get_property('CIDR')
# block = ipaddress.ip_network(block_address, config_id, config_name)
block = ipaddress.ip_network(block_address)
block_data = {}
block_data_list = []
if bam_block.get_type() == 'IP4Block':
for network in bam_block.get_ip4_networks():
return_data = calculate_network_stats(network, config_id, config_name)
# This constructs adding network as key with all values that were returned from calculate network stats
block_data_list.extend(return_data)
for found_block in bam_block.get_ip4_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
block_data_list.extend(return_data)
next_address = bam_block.get_next_available_ip4_address()
if next_address != '':
block_data.update({'next_available_address': next_address})
try:
next_available = bam_block.get_next_available_ip4_network(256, auto_create=False)
block_data.update({'next_available_network': next_available})
except APIException as e:
# Nothing to do here since we aren't adding anything to the object
next_available = ''
elif bam_block.get_type() == 'IP6Block':
for network in bam_block.get_ip6_networks():
return_data = calculate_network_stats(network, config_id, config_name)
for found_block in bam_block.get_ip6_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
else:
next_available = ''
return block_data_list
# to tag address, add_ip4 - get back IP4Address object. Call object.link_entity(entity id of the tag)
#
# GET, PUT or POST
@route(app, '/lumeta/addiplist', methods=['GET', 'PUT', 'POST'])
# @util.rest_workflow_permission_required('addiplist_page')
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def addiplist_addiplist_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
rdata_arr = request.get_json()
stats = {}
global api
for rdata in rdata_arr:
config_name = rdata["config_name"]
add_network = rdata["add_network_block"]
device_list = rdata["deviceList"]
added_ips = 0
dup_ips = 0
# Get API object up front and use it going forward. That way, auth key doesn't expire on us
# when we are midway in processing
api = g.user.get_api()
print(add_network)
print(device_list)
config = api.get_configuration(config_name)
for device in device_list:
print(device["ip"])
(added_ip, dup_ip, ip) = add_device(device, config, add_network)
added_ips += added_ip
dup_ips += dup_ip
# Add tag if ip was added
if added_ip == 1:
add_tag(ip)
stats.update({config_name: {"added_ips": added_ips, "dup_ips": dup_ips}})
return jsonify(stats)
def add_device(device, config, add_network):
# Algorithm to add ip to BAM
# check if block exists for this ip address.
try:
ip = device["ip"]
mac = ''
mac = device["mac"]
family = device["family"]
blk_data = None
dup_ip = 0
added_ip = 0
ip_obj = None
if family == '4':
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
else:
blk_data = config.get_ip_range_by_ip('IP6Block', ip)
# if block exists, check for network
network_data = None
if family == '4':
network_data = config.get_ip_range_by_ip('IP4Network', ip)
else:
network_data = config.get_ip_range_by_ip('IP6Network', ip)
# If Block and Network exists, add ip address
# currently, assigning ip address is throwing API exception:Server raised fault: "Duplicate of another item"
# Need to see how we can catch it
if blk_data is not None and network_data is not None:
# Add ip address
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
# If no block exists and add_network is set to true, create Block with /32, create Network with /32 and then
# create ip with /32
except PortalException as e:
# No block address containing input ip address exists. Check the flag and create one
if add_network:
try:
# Add Block, then network and finally add ip
# Below line is returning BAMException - IPv4 Blocks cannot be in size of /31 and /32
# So, at this point, if there is no container, do not add ip address
# config.add_ip4_block_by_cidr(ip)
if blk_data is None:
# add /30 for addressblock
block_network = ipaddress.ip_network(ip + '/30', strict=False)
config.add_ip4_block_by_cidr(block_network.exploded)
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
if blk_data is not None:
# create network in block
blk_data.add_ip4_network(ip + '/32')
# create ip under above created network
network_data = config.get_ip_range_by_ip('IP4Network', ip)
if network_data is not None:
# Add ip address
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
except APIException as ex:
if "Duplicate" in ex.get_message():
dup_ip += 1
# else:
# Seeing intermittent error while adding address block, so had to stop logging error
# app.loggererror('add_ip: ' + ex.message)
except APIException as ex:
# when ip address already exists, it returns BAMException with message 'Server raised fault: "Duplicate of another item"'
# "Duplicate" in ex.get_message()
if "Duplicate" in ex.get_message():
dup_ip += 1
else:
# TODO - how to log info message and not error?
app.loggererror('add_ip: ' + ex.get_message())
return (added_ip, dup_ip, ip_obj)
def assign_ip(network_data, ip, mac, family):
if mac is not '':
if family == '4':
ip = network_data.assign_ip4_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
ip = network_data.assign_ip6_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
if family == '4':
ip = network_data.assign_ip4_address(ip, '', '', 'MAKE_STATIC')
else:
ip = network_data.assign_ip6_address(ip, '', '', 'MAKE_STATIC')
return ip
def add_tag(ip):
tag_group = None
tag = None
try:
tag_group = api.get_tag_group_by_name("Lumeta")
# If tag group exists, chances are that tag exists as well, but just in case if it doesn't
tag = tag_group.get_tag_by_name("Discovered Device")
except PortalException as e:
if tag_group is None:
# Tag group does not exist, create one
tag_group = api.add_tag_group("Lumeta")
if tag is None:
# Get tag group object. above API to add tag group is only returning object id instead of entire object
# Calling add_tag on it is throwing exception 'int' object has no attribute 'add_tag'
tag_group = api.get_tag_group_by_name("Lumeta")
# Create Tag under Lumeta
tag = tag_group.add_tag("Discovered Device")
try:
# assign tag to ip
ip.link_entity(tag)
except APIException as ex:
print(ex.get_message())
| 41.13141
| 129
| 0.623003
|
import ipaddress
from flask import request, g, abort, jsonify
from bluecat.api_exception import PortalException, APIException
from bluecat import route, util
from main_app import app
api = None
@route(app, '/lumeta/getnetworklist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def get_networks_get_networks_page():
g.user.logger.info('SUCCESS')
configurations = None
configurations_json = []
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print (c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
configurations_json.append(configuration_json)
return jsonify(configurations_json)
@route(app, '/lumeta/getiplist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def getiplist_getiplist_page():
g.user.logger.info('SUCCESS')
networks = []
ip_addresses = []
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print(c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
try:
for nw in c.get_children_of_type('IP4Block'):
print(nw)
for n in g.user.get_api().get_by_object_types(nw.get_property('CIDR'),
['IP4Network', 'IP4Block', 'IP6Network', 'IP6Block']):
if '6' in n.get_type():
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['prefix']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
else:
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['CIDR']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
except Exception as e:
app.loggererror('get_subnets: ' + e.message)
return jsonify(ip_addresses)
def calculate_network_stats(bam_network, config_id, config_name):
if bam_network.get_type() == 'IP4Network':
network_address = bam_network.get_property('CIDR')
network = ipaddress.ip_network(network_address)
else:
network_address = bam_network.get_property('prefix')
network = ipaddress.ip_network(network_address)
ip_addresses = []
ip_data = {}
if bam_network.get_type() == 'IP4Network':
for n in bam_network.get_children_of_type('IP4Address'):
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
next_address = bam_network.get_next_available_ip4_address()
else:
for n in bam_network.get_children_of_type('IP6Address'):
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
return ip_addresses
def calculate_block_stats(bam_block, config_id, config_name):
if bam_block.get_type() == 'IP6Block':
block_address = bam_block.get_property('prefix')
block = ipaddress.ip_network(block_address)
else:
block_address = bam_block.get_property('CIDR')
block = ipaddress.ip_network(block_address)
block_data = {}
block_data_list = []
if bam_block.get_type() == 'IP4Block':
for network in bam_block.get_ip4_networks():
return_data = calculate_network_stats(network, config_id, config_name)
block_data_list.extend(return_data)
for found_block in bam_block.get_ip4_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
block_data_list.extend(return_data)
next_address = bam_block.get_next_available_ip4_address()
if next_address != '':
block_data.update({'next_available_address': next_address})
try:
next_available = bam_block.get_next_available_ip4_network(256, auto_create=False)
block_data.update({'next_available_network': next_available})
except APIException as e:
next_available = ''
elif bam_block.get_type() == 'IP6Block':
for network in bam_block.get_ip6_networks():
return_data = calculate_network_stats(network, config_id, config_name)
for found_block in bam_block.get_ip6_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
else:
next_available = ''
return block_data_list
# to tag address, add_ip4 - get back IP4Address object. Call object.link_entity(entity id of the tag)
#
# GET, PUT or POST
@route(app, '/lumeta/addiplist', methods=['GET', 'PUT', 'POST'])
# @util.rest_workflow_permission_required('addiplist_page')
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def addiplist_addiplist_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
rdata_arr = request.get_json()
stats = {}
global api
for rdata in rdata_arr:
config_name = rdata["config_name"]
add_network = rdata["add_network_block"]
device_list = rdata["deviceList"]
added_ips = 0
dup_ips = 0
# Get API object up front and use it going forward. That way, auth key doesn't expire on us
api = g.user.get_api()
print(add_network)
print(device_list)
config = api.get_configuration(config_name)
for device in device_list:
print(device["ip"])
(added_ip, dup_ip, ip) = add_device(device, config, add_network)
added_ips += added_ip
dup_ips += dup_ip
if added_ip == 1:
add_tag(ip)
stats.update({config_name: {"added_ips": added_ips, "dup_ips": dup_ips}})
return jsonify(stats)
def add_device(device, config, add_network):
try:
ip = device["ip"]
mac = ''
mac = device["mac"]
family = device["family"]
blk_data = None
dup_ip = 0
added_ip = 0
ip_obj = None
if family == '4':
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
else:
blk_data = config.get_ip_range_by_ip('IP6Block', ip)
network_data = None
if family == '4':
network_data = config.get_ip_range_by_ip('IP4Network', ip)
else:
network_data = config.get_ip_range_by_ip('IP6Network', ip)
if blk_data is not None and network_data is not None:
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
except PortalException as e:
if add_network:
try:
if blk_data is None:
block_network = ipaddress.ip_network(ip + '/30', strict=False)
config.add_ip4_block_by_cidr(block_network.exploded)
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
if blk_data is not None:
blk_data.add_ip4_network(ip + '/32')
network_data = config.get_ip_range_by_ip('IP4Network', ip)
if network_data is not None:
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
except APIException as ex:
if "Duplicate" in ex.get_message():
dup_ip += 1
except APIException as ex:
if "Duplicate" in ex.get_message():
dup_ip += 1
else:
app.loggererror('add_ip: ' + ex.get_message())
return (added_ip, dup_ip, ip_obj)
def assign_ip(network_data, ip, mac, family):
if mac is not '':
if family == '4':
ip = network_data.assign_ip4_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
ip = network_data.assign_ip6_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
if family == '4':
ip = network_data.assign_ip4_address(ip, '', '', 'MAKE_STATIC')
else:
ip = network_data.assign_ip6_address(ip, '', '', 'MAKE_STATIC')
return ip
def add_tag(ip):
tag_group = None
tag = None
try:
tag_group = api.get_tag_group_by_name("Lumeta")
tag = tag_group.get_tag_by_name("Discovered Device")
except PortalException as e:
if tag_group is None:
# Tag group does not exist, create one
tag_group = api.add_tag_group("Lumeta")
if tag is None:
# Get tag group object. above API to add tag group is only returning object id instead of entire object
# Calling add_tag on it is throwing exception 'int' object has no attribute 'add_tag'
tag_group = api.get_tag_group_by_name("Lumeta")
# Create Tag under Lumeta
tag = tag_group.add_tag("Discovered Device")
try:
# assign tag to ip
ip.link_entity(tag)
except APIException as ex:
print(ex.get_message())
| true
| true
|
7908b660b7e7a7290576ce10a318d7140ce3f0d3
| 1,849
|
py
|
Python
|
pypbbot/affairs/builtin.py
|
PHIKN1GHT/pypbbot_archived
|
8ab70830509c43b0babc53c9972d0a73481bdaa2
|
[
"MIT"
] | null | null | null |
pypbbot/affairs/builtin.py
|
PHIKN1GHT/pypbbot_archived
|
8ab70830509c43b0babc53c9972d0a73481bdaa2
|
[
"MIT"
] | null | null | null |
pypbbot/affairs/builtin.py
|
PHIKN1GHT/pypbbot_archived
|
8ab70830509c43b0babc53c9972d0a73481bdaa2
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
from typing import Optional, Union, Any, Dict
from pypbbot.driver import AffairDriver
from pypbbot.typing import Event
from pypbbot.utils import Clips
from pypbbot.protocol import GroupMessageEvent, PrivateMessageEvent
from enum import Enum
import asyncio
from pypbbot.logging import logger
from pypbbot.utils import sendBackClipsTo
__all__ = ['HandlerPriority', 'BaseAffair', 'ChatAffair']
class HandlerPriority(Enum):
SYSTEM = 0 # SHOULD NOT USED BY PLUGINS
VERY_HIGH = 1
HIGH = 2
NORMAL = 3
LOW = 4
VERY_LOW = 5
def __lt__(self, other: object) -> bool:
if not isinstance(other, HandlerPriority):
return NotImplemented
return self.value < other.value
class BaseAffair:
def __init__(self, driver: AffairDriver, event: Event) -> None:
logger.debug(
'A new affair has been created for event [{}]'.format(type(event)))
self.event: Optional[Event] = event
self.driver: AffairDriver = driver
self.states: Dict[str, Any] = {}
self.finished: bool = False
return
class ChatAffair(BaseAffair):
def __init__(self, driver: AffairDriver, event: Union[GroupMessageEvent, PrivateMessageEvent], sender_id: int) -> None:
self.event: Union[GroupMessageEvent, PrivateMessageEvent] = event
self.driver: AffairDriver = driver
self.receiver_id: int = event.self_id
self.sender_id: int = sender_id
self.raw_message: str = event.raw_message
return
async def send(self, clips: Union[Clips, str, int, float]) -> Any:
return await sendBackClipsTo(self.event, clips)
def sendAndWait(self, clips: Union[Clips, str, int, float]) -> Any:
return asyncio.run(self.send(clips))
| 31.87931
| 123
| 0.685776
|
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
from typing import Optional, Union, Any, Dict
from pypbbot.driver import AffairDriver
from pypbbot.typing import Event
from pypbbot.utils import Clips
from pypbbot.protocol import GroupMessageEvent, PrivateMessageEvent
from enum import Enum
import asyncio
from pypbbot.logging import logger
from pypbbot.utils import sendBackClipsTo
__all__ = ['HandlerPriority', 'BaseAffair', 'ChatAffair']
class HandlerPriority(Enum):
SYSTEM = 0
VERY_HIGH = 1
HIGH = 2
NORMAL = 3
LOW = 4
VERY_LOW = 5
def __lt__(self, other: object) -> bool:
if not isinstance(other, HandlerPriority):
return NotImplemented
return self.value < other.value
class BaseAffair:
def __init__(self, driver: AffairDriver, event: Event) -> None:
logger.debug(
'A new affair has been created for event [{}]'.format(type(event)))
self.event: Optional[Event] = event
self.driver: AffairDriver = driver
self.states: Dict[str, Any] = {}
self.finished: bool = False
return
class ChatAffair(BaseAffair):
def __init__(self, driver: AffairDriver, event: Union[GroupMessageEvent, PrivateMessageEvent], sender_id: int) -> None:
self.event: Union[GroupMessageEvent, PrivateMessageEvent] = event
self.driver: AffairDriver = driver
self.receiver_id: int = event.self_id
self.sender_id: int = sender_id
self.raw_message: str = event.raw_message
return
async def send(self, clips: Union[Clips, str, int, float]) -> Any:
return await sendBackClipsTo(self.event, clips)
def sendAndWait(self, clips: Union[Clips, str, int, float]) -> Any:
return asyncio.run(self.send(clips))
| true
| true
|
7908b6624e084b51ff962a274290a758fa4a1469
| 18,069
|
py
|
Python
|
odoo-13.0/addons/account/tests/account_test_savepoint.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/account/tests/account_test_savepoint.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/account/tests/account_test_savepoint.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from odoo import fields
from odoo.tests.common import Form, SavepointCase
from odoo.tests import tagged
from contextlib import contextmanager
from unittest.mock import patch
import datetime
@tagged('post_install', '-at_install')
class AccountTestInvoicingCommon(SavepointCase):
@classmethod
def copy_account(cls, account):
suffix_nb = 1
while True:
new_code = '%s (%s)' % (account.code, suffix_nb)
if account.search_count([('company_id', '=', account.company_id.id), ('code', '=', new_code)]):
suffix_nb += 1
else:
return account.copy(default={'code': new_code})
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass()
if chart_template_ref:
chart_template = cls.env.ref(chart_template_ref)
else:
chart_template = cls.env.ref('l10n_generic_coa.configurable_chart_template', raise_if_not_found=False)
if not chart_template:
cls.tearDownClass()
# skipTest raises exception
cls.skipTest(cls, "Accounting Tests skipped because the user's company has no chart of accounts.")
# Create user.
user = cls.env['res.users'].create({
'name': 'Because I am accountman!',
'login': 'accountman',
'groups_id': [(6, 0, cls.env.user.groups_id.ids), (4, cls.env.ref('account.group_account_user').id)],
})
user.partner_id.email = 'accountman@test.com'
# Shadow the current environment/cursor with one having the report user.
# This is mandatory to test access rights.
cls.env = cls.env(user=user)
cls.cr = cls.env.cr
cls.company_data_2 = cls.setup_company_data('company_2_data', chart_template)
cls.company_data = cls.setup_company_data('company_1_data', chart_template)
user.write({
'company_ids': [(6, 0, (cls.company_data['company'] + cls.company_data_2['company']).ids)],
'company_id': cls.company_data['company'].id,
})
cls.currency_data = cls.setup_multi_currency_data()
# ==== Taxes ====
cls.tax_sale_a = cls.company_data['default_tax_sale']
cls.tax_sale_b = cls.company_data['default_tax_sale'].copy()
cls.tax_purchase_a = cls.company_data['default_tax_purchase']
cls.tax_purchase_b = cls.company_data['default_tax_purchase'].copy()
cls.tax_armageddon = cls.setup_armageddon_tax('complex_tax', cls.company_data)
# ==== Products ====
cls.product_a = cls.env['product.product'].create({
'name': 'product_a',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'lst_price': 1000.0,
'standard_price': 800.0,
'property_account_income_id': cls.company_data['default_account_revenue'].id,
'property_account_expense_id': cls.company_data['default_account_expense'].id,
'taxes_id': [(6, 0, cls.tax_sale_a.ids)],
'supplier_taxes_id': [(6, 0, cls.tax_purchase_a.ids)],
})
cls.product_b = cls.env['product.product'].create({
'name': 'product_b',
'uom_id': cls.env.ref('uom.product_uom_dozen').id,
'lst_price': 200.0,
'standard_price': 160.0,
'property_account_income_id': cls.copy_account(cls.company_data['default_account_revenue']).id,
'property_account_expense_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'taxes_id': [(6, 0, (cls.tax_sale_a + cls.tax_sale_b).ids)],
'supplier_taxes_id': [(6, 0, (cls.tax_purchase_a + cls.tax_purchase_b).ids)],
})
# ==== Fiscal positions ====
cls.fiscal_pos_a = cls.env['account.fiscal.position'].create({
'name': 'fiscal_pos_a',
'tax_ids': [
(0, None, {
'tax_src_id': cls.tax_sale_a.id,
'tax_dest_id': cls.tax_sale_b.id,
}),
(0, None, {
'tax_src_id': cls.tax_purchase_a.id,
'tax_dest_id': cls.tax_purchase_b.id,
}),
],
'account_ids': [
(0, None, {
'account_src_id': cls.product_a.property_account_income_id.id,
'account_dest_id': cls.product_b.property_account_income_id.id,
}),
(0, None, {
'account_src_id': cls.product_a.property_account_expense_id.id,
'account_dest_id': cls.product_b.property_account_expense_id.id,
}),
],
})
# ==== Payment terms ====
cls.pay_terms_a = cls.env.ref('account.account_payment_term_immediate')
cls.pay_terms_b = cls.env['account.payment.term'].create({
'name': '30% Advance End of Following Month',
'note': 'Payment terms: 30% Advance End of Following Month',
'line_ids': [
(0, 0, {
'value': 'percent',
'value_amount': 30.0,
'sequence': 400,
'days': 0,
'option': 'day_after_invoice_date',
}),
(0, 0, {
'value': 'balance',
'value_amount': 0.0,
'sequence': 500,
'days': 31,
'option': 'day_following_month',
}),
],
})
# ==== Partners ====
cls.partner_a = cls.env['res.partner'].create({
'name': 'partner_a',
'property_payment_term_id': cls.pay_terms_a.id,
'property_supplier_payment_term_id': cls.pay_terms_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].id,
'property_account_payable_id': cls.company_data['default_account_payable'].id,
'company_id': False,
})
cls.partner_b = cls.env['res.partner'].create({
'name': 'partner_b',
'property_payment_term_id': cls.pay_terms_b.id,
'property_supplier_payment_term_id': cls.pay_terms_b.id,
'property_account_position_id': cls.fiscal_pos_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].copy().id,
'property_account_payable_id': cls.company_data['default_account_payable'].copy().id,
'company_id': False,
})
# ==== Cash rounding ====
cls.cash_rounding_a = cls.env['account.cash.rounding'].create({
'name': 'add_invoice_line',
'rounding': 0.05,
'strategy': 'add_invoice_line',
'account_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'rounding_method': 'UP',
})
cls.cash_rounding_b = cls.env['account.cash.rounding'].create({
'name': 'biggest_tax',
'rounding': 0.05,
'strategy': 'biggest_tax',
'rounding_method': 'DOWN',
})
@classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
''' Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing.
'''
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = [('company_id', '=', company.id)] + domain
account = None
if template_code:
account = cls.env['account.account'].search(domain + [('code', '=like', template_code + '%')], limit=1)
if not account:
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({
'name': company_name,
'currency_id': currency.id,
**kwargs,
})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
# The currency could be different after the installation of the chart template.
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {
'company': company,
'currency': company.currency_id,
'default_account_revenue': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)
], limit=1),
'default_account_expense': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)
], limit=1),
'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [
('user_type_id.type', '=', 'receivable')
]),
'default_account_payable': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id.type', '=', 'payable')
], limit=1),
'default_account_assets': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)
], limit=1),
'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_journal_misc': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'general')
], limit=1),
'default_journal_sale': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'sale')
], limit=1),
'default_journal_purchase': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'purchase')
], limit=1),
'default_journal_bank': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'bank')
], limit=1),
'default_journal_cash': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'cash')
], limit=1),
'default_tax_sale': company.account_sale_tax_id,
'default_tax_purchase': company.account_purchase_tax_id,
}
@classmethod
def setup_multi_currency_data(cls, default_values={}, rate2016=3.0, rate2017=2.0):
foreign_currency = cls.env['res.currency'].create({
'name': 'Gold Coin',
'symbol': '☺',
'rounding': 0.001,
'position': 'after',
'currency_unit_label': 'Gold',
'currency_subunit_label': 'Silver',
**default_values,
})
rate1 = cls.env['res.currency.rate'].create({
'name': '2016-01-01',
'rate': rate2016,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
rate2 = cls.env['res.currency.rate'].create({
'name': '2017-01-01',
'rate': rate2017,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
return {
'currency': foreign_currency,
'rates': rate1 + rate2,
}
@classmethod
def setup_armageddon_tax(cls, tax_name, company_data):
return cls.env['account.tax'].create({
'name': '%s (group)' % tax_name,
'amount_type': 'group',
'amount': 0.0,
'children_tax_ids': [
(0, 0, {
'name': '%s (child 1)' % tax_name,
'amount_type': 'percent',
'amount': 20.0,
'price_include': True,
'include_base_amount': True,
'tax_exigibility': 'on_invoice',
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
}),
(0, 0, {
'name': '%s (child 2)' % tax_name,
'amount_type': 'percent',
'amount': 10.0,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': company_data['default_account_tax_sale'].copy().id,
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
}),
],
})
@classmethod
def init_invoice(cls, move_type, partner=None, invoice_date=None):
move_form = Form(cls.env['account.move'].with_context(default_type=move_type))
move_form.invoice_date = invoice_date or fields.Date.from_string('2019-01-01')
move_form.partner_id = partner or cls.partner_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_b
return move_form.save()
def assertInvoiceValues(self, move, expected_lines_values, expected_move_values):
def sort_lines(lines):
return lines.sorted(lambda line: (line.exclude_from_invoice_tab, not bool(line.tax_line_id), line.name or '', line.balance))
self.assertRecordValues(sort_lines(move.line_ids.sorted()), expected_lines_values)
self.assertRecordValues(sort_lines(move.invoice_line_ids.sorted()), expected_lines_values[:len(move.invoice_line_ids)])
self.assertRecordValues(move, [expected_move_values])
@contextmanager
def mocked_today(self, forced_today):
''' Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'.
'''
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
yield
class AccountingSavepointCase(AccountTestInvoicingCommon):
# Ensure the backward-compatibility before saas-13.2.
pass
| 44.178484
| 136
| 0.532293
|
from odoo import fields
from odoo.tests.common import Form, SavepointCase
from odoo.tests import tagged
from contextlib import contextmanager
from unittest.mock import patch
import datetime
@tagged('post_install', '-at_install')
class AccountTestInvoicingCommon(SavepointCase):
@classmethod
def copy_account(cls, account):
suffix_nb = 1
while True:
new_code = '%s (%s)' % (account.code, suffix_nb)
if account.search_count([('company_id', '=', account.company_id.id), ('code', '=', new_code)]):
suffix_nb += 1
else:
return account.copy(default={'code': new_code})
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass()
if chart_template_ref:
chart_template = cls.env.ref(chart_template_ref)
else:
chart_template = cls.env.ref('l10n_generic_coa.configurable_chart_template', raise_if_not_found=False)
if not chart_template:
cls.tearDownClass()
cls.skipTest(cls, "Accounting Tests skipped because the user's company has no chart of accounts.")
# Create user.
user = cls.env['res.users'].create({
'name': 'Because I am accountman!',
'login': 'accountman',
'groups_id': [(6, 0, cls.env.user.groups_id.ids), (4, cls.env.ref('account.group_account_user').id)],
})
user.partner_id.email = 'accountman@test.com'
# Shadow the current environment/cursor with one having the report user.
# This is mandatory to test access rights.
cls.env = cls.env(user=user)
cls.cr = cls.env.cr
cls.company_data_2 = cls.setup_company_data('company_2_data', chart_template)
cls.company_data = cls.setup_company_data('company_1_data', chart_template)
user.write({
'company_ids': [(6, 0, (cls.company_data['company'] + cls.company_data_2['company']).ids)],
'company_id': cls.company_data['company'].id,
})
cls.currency_data = cls.setup_multi_currency_data()
# ==== Taxes ====
cls.tax_sale_a = cls.company_data['default_tax_sale']
cls.tax_sale_b = cls.company_data['default_tax_sale'].copy()
cls.tax_purchase_a = cls.company_data['default_tax_purchase']
cls.tax_purchase_b = cls.company_data['default_tax_purchase'].copy()
cls.tax_armageddon = cls.setup_armageddon_tax('complex_tax', cls.company_data)
# ==== Products ====
cls.product_a = cls.env['product.product'].create({
'name': 'product_a',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'lst_price': 1000.0,
'standard_price': 800.0,
'property_account_income_id': cls.company_data['default_account_revenue'].id,
'property_account_expense_id': cls.company_data['default_account_expense'].id,
'taxes_id': [(6, 0, cls.tax_sale_a.ids)],
'supplier_taxes_id': [(6, 0, cls.tax_purchase_a.ids)],
})
cls.product_b = cls.env['product.product'].create({
'name': 'product_b',
'uom_id': cls.env.ref('uom.product_uom_dozen').id,
'lst_price': 200.0,
'standard_price': 160.0,
'property_account_income_id': cls.copy_account(cls.company_data['default_account_revenue']).id,
'property_account_expense_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'taxes_id': [(6, 0, (cls.tax_sale_a + cls.tax_sale_b).ids)],
'supplier_taxes_id': [(6, 0, (cls.tax_purchase_a + cls.tax_purchase_b).ids)],
})
# ==== Fiscal positions ====
cls.fiscal_pos_a = cls.env['account.fiscal.position'].create({
'name': 'fiscal_pos_a',
'tax_ids': [
(0, None, {
'tax_src_id': cls.tax_sale_a.id,
'tax_dest_id': cls.tax_sale_b.id,
}),
(0, None, {
'tax_src_id': cls.tax_purchase_a.id,
'tax_dest_id': cls.tax_purchase_b.id,
}),
],
'account_ids': [
(0, None, {
'account_src_id': cls.product_a.property_account_income_id.id,
'account_dest_id': cls.product_b.property_account_income_id.id,
}),
(0, None, {
'account_src_id': cls.product_a.property_account_expense_id.id,
'account_dest_id': cls.product_b.property_account_expense_id.id,
}),
],
})
# ==== Payment terms ====
cls.pay_terms_a = cls.env.ref('account.account_payment_term_immediate')
cls.pay_terms_b = cls.env['account.payment.term'].create({
'name': '30% Advance End of Following Month',
'note': 'Payment terms: 30% Advance End of Following Month',
'line_ids': [
(0, 0, {
'value': 'percent',
'value_amount': 30.0,
'sequence': 400,
'days': 0,
'option': 'day_after_invoice_date',
}),
(0, 0, {
'value': 'balance',
'value_amount': 0.0,
'sequence': 500,
'days': 31,
'option': 'day_following_month',
}),
],
})
# ==== Partners ====
cls.partner_a = cls.env['res.partner'].create({
'name': 'partner_a',
'property_payment_term_id': cls.pay_terms_a.id,
'property_supplier_payment_term_id': cls.pay_terms_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].id,
'property_account_payable_id': cls.company_data['default_account_payable'].id,
'company_id': False,
})
cls.partner_b = cls.env['res.partner'].create({
'name': 'partner_b',
'property_payment_term_id': cls.pay_terms_b.id,
'property_supplier_payment_term_id': cls.pay_terms_b.id,
'property_account_position_id': cls.fiscal_pos_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].copy().id,
'property_account_payable_id': cls.company_data['default_account_payable'].copy().id,
'company_id': False,
})
# ==== Cash rounding ====
cls.cash_rounding_a = cls.env['account.cash.rounding'].create({
'name': 'add_invoice_line',
'rounding': 0.05,
'strategy': 'add_invoice_line',
'account_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'rounding_method': 'UP',
})
cls.cash_rounding_b = cls.env['account.cash.rounding'].create({
'name': 'biggest_tax',
'rounding': 0.05,
'strategy': 'biggest_tax',
'rounding_method': 'DOWN',
})
@classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = [('company_id', '=', company.id)] + domain
account = None
if template_code:
account = cls.env['account.account'].search(domain + [('code', '=like', template_code + '%')], limit=1)
if not account:
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({
'name': company_name,
'currency_id': currency.id,
**kwargs,
})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
# The currency could be different after the installation of the chart template.
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {
'company': company,
'currency': company.currency_id,
'default_account_revenue': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)
], limit=1),
'default_account_expense': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)
], limit=1),
'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [
('user_type_id.type', '=', 'receivable')
]),
'default_account_payable': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id.type', '=', 'payable')
], limit=1),
'default_account_assets': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)
], limit=1),
'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_journal_misc': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'general')
], limit=1),
'default_journal_sale': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'sale')
], limit=1),
'default_journal_purchase': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'purchase')
], limit=1),
'default_journal_bank': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'bank')
], limit=1),
'default_journal_cash': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'cash')
], limit=1),
'default_tax_sale': company.account_sale_tax_id,
'default_tax_purchase': company.account_purchase_tax_id,
}
@classmethod
def setup_multi_currency_data(cls, default_values={}, rate2016=3.0, rate2017=2.0):
foreign_currency = cls.env['res.currency'].create({
'name': 'Gold Coin',
'symbol': '☺',
'rounding': 0.001,
'position': 'after',
'currency_unit_label': 'Gold',
'currency_subunit_label': 'Silver',
**default_values,
})
rate1 = cls.env['res.currency.rate'].create({
'name': '2016-01-01',
'rate': rate2016,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
rate2 = cls.env['res.currency.rate'].create({
'name': '2017-01-01',
'rate': rate2017,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
return {
'currency': foreign_currency,
'rates': rate1 + rate2,
}
@classmethod
def setup_armageddon_tax(cls, tax_name, company_data):
return cls.env['account.tax'].create({
'name': '%s (group)' % tax_name,
'amount_type': 'group',
'amount': 0.0,
'children_tax_ids': [
(0, 0, {
'name': '%s (child 1)' % tax_name,
'amount_type': 'percent',
'amount': 20.0,
'price_include': True,
'include_base_amount': True,
'tax_exigibility': 'on_invoice',
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
}),
(0, 0, {
'name': '%s (child 2)' % tax_name,
'amount_type': 'percent',
'amount': 10.0,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': company_data['default_account_tax_sale'].copy().id,
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
}),
],
})
@classmethod
def init_invoice(cls, move_type, partner=None, invoice_date=None):
move_form = Form(cls.env['account.move'].with_context(default_type=move_type))
move_form.invoice_date = invoice_date or fields.Date.from_string('2019-01-01')
move_form.partner_id = partner or cls.partner_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_b
return move_form.save()
def assertInvoiceValues(self, move, expected_lines_values, expected_move_values):
def sort_lines(lines):
return lines.sorted(lambda line: (line.exclude_from_invoice_tab, not bool(line.tax_line_id), line.name or '', line.balance))
self.assertRecordValues(sort_lines(move.line_ids.sorted()), expected_lines_values)
self.assertRecordValues(sort_lines(move.invoice_line_ids.sorted()), expected_lines_values[:len(move.invoice_line_ids)])
self.assertRecordValues(move, [expected_move_values])
@contextmanager
def mocked_today(self, forced_today):
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
yield
class AccountingSavepointCase(AccountTestInvoicingCommon):
# Ensure the backward-compatibility before saas-13.2.
pass
| true
| true
|
7908b7c4069057a87abf6c324be528370b648b1a
| 25,168
|
py
|
Python
|
diofant/polys/numberfields.py
|
diofant/diofant
|
0677d240eb5de697f851c6c844fefc8039754edc
|
[
"BSD-3-Clause"
] | 57
|
2016-09-13T23:16:26.000Z
|
2022-03-29T06:45:51.000Z
|
diofant/polys/numberfields.py
|
diofant/diofant
|
0677d240eb5de697f851c6c844fefc8039754edc
|
[
"BSD-3-Clause"
] | 402
|
2016-05-11T11:11:47.000Z
|
2022-03-31T14:27:02.000Z
|
diofant/polys/numberfields.py
|
diofant/diofant
|
0677d240eb5de697f851c6c844fefc8039754edc
|
[
"BSD-3-Clause"
] | 20
|
2016-05-11T08:17:37.000Z
|
2021-09-10T09:15:51.000Z
|
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
| 30.618005
| 88
| 0.54458
|
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
return p
surds = [z for y, z in a]
for i, si in enumerate(surds):
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1]
def primitive_element(extension, **args):
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256):
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1]
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
| true
| true
|
7908b8000df007a8b6e108b0cdc6294cd6f99470
| 1,158
|
py
|
Python
|
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/feedback_on_exception_test_4/test.py
|
ariawahyuw/Coffee-Machine
|
eafb5943aebed35124bff8e7989b6129c6a5b906
|
[
"Apache-2.0"
] | null | null | null |
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/feedback_on_exception_test_4/test.py
|
ariawahyuw/Coffee-Machine
|
eafb5943aebed35124bff8e7989b6129c6a5b906
|
[
"Apache-2.0"
] | 1
|
2022-02-10T13:32:31.000Z
|
2022-02-10T13:32:31.000Z
|
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/feedback_on_exception_test_4/test.py
|
ariawahyuw/Coffee-Machine
|
eafb5943aebed35124bff8e7989b6129c6a5b906
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import textwrap
from typing import Any, List
from hstest.check_result import CheckResult
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
class FeedbackOnExceptionTest4(StageTest):
def generate(self) -> List[TestCase]:
return [
TestCase(feedback_on_exception={
ZeroDivisionError: 'Do not divide by zero!',
AttributeError: 'Attribute Error raised!',
Exception: 'Base ex raised'
})
]
def check(self, reply: str, attach: Any) -> CheckResult:
return CheckResult(True, '')
class Test(unittest.TestCase):
def test(self):
status, feedback = FeedbackOnExceptionTest4(
'tests.outcomes.feedback_on_exception_test_4.program'
).run_tests()
self.assertEqual(textwrap.dedent('''\
Exception in test #1
Base ex raised
Traceback (most recent call last):
File "program.py", line 1, in <module>
raise Exception()
Exception'''), feedback)
self.assertEqual(status, -1)
| 27.571429
| 65
| 0.606218
|
import unittest
import textwrap
from typing import Any, List
from hstest.check_result import CheckResult
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
class FeedbackOnExceptionTest4(StageTest):
def generate(self) -> List[TestCase]:
return [
TestCase(feedback_on_exception={
ZeroDivisionError: 'Do not divide by zero!',
AttributeError: 'Attribute Error raised!',
Exception: 'Base ex raised'
})
]
def check(self, reply: str, attach: Any) -> CheckResult:
return CheckResult(True, '')
class Test(unittest.TestCase):
def test(self):
status, feedback = FeedbackOnExceptionTest4(
'tests.outcomes.feedback_on_exception_test_4.program'
).run_tests()
self.assertEqual(textwrap.dedent('''\
Exception in test #1
Base ex raised
Traceback (most recent call last):
File "program.py", line 1, in <module>
raise Exception()
Exception'''), feedback)
self.assertEqual(status, -1)
| true
| true
|
7908b85deac4d7cb6a9b5644c1a18390db2bba6a
| 4,067
|
py
|
Python
|
setup.py
|
cglazner/pyro
|
f6a690e55c13cbef789d231b6c8ea71b22bd0bbb
|
[
"MIT"
] | 1
|
2021-06-17T13:47:40.000Z
|
2021-06-17T13:47:40.000Z
|
setup.py
|
cll27/pyro
|
8279d69225ecc8ff07ba65aa2a9101720c926e86
|
[
"MIT"
] | null | null | null |
setup.py
|
cll27/pyro
|
8279d69225ecc8ff07ba65aa2a9101720c926e86
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
from setuptools import find_packages, setup
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
VERSION = """
# This file is auto-generated with the version information during setup.py installation.
__version__ = '{}'
"""
# Find pyro version.
for line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):
if line.startswith('version_prefix = '):
version = line.strip().split()[2][1:-1]
# Append current commit sha to version
commit_sha = ''
try:
commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],
cwd=PROJECT_PATH).decode('ascii').strip()
except OSError:
pass
# Write version to _version.py
if commit_sha:
version += '+{}'.format(commit_sha)
with open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:
f.write(VERSION.format(version))
# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl
# When releasing on pypi, make sure pandoc is on your system:
# $ brew install pandoc # OS X
# $ sudo apt-get install pandoc # Ubuntu Linux
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError) as e:
sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e))
sys.stderr.flush()
long_description = open('README.md').read()
# Remove badges since they will always be obsolete.
blacklist = ['Build Status', 'Latest Version', 'Documentation Status',
'travis-ci.org', 'pypi.python.org', 'pyro-ppl.readthedocs.io']
long_description = '\n'.join(
[line for line in long_description.split('\n') if not any(patt in line for patt in blacklist)])
# examples/tutorials
EXTRAS_REQUIRE = [
'jupyter>=1.0.0',
'matplotlib>=1.3',
'observations>=0.1.4',
'pillow',
'torchvision',
'visdom>=0.1.4',
'pandas',
'wget',
]
if sys.version_info[0] == 2:
EXTRAS_REQUIRE.append('functools32')
setup(
name='pyro-ppl',
version=version,
description='A Python library for probabilistic modeling and inference',
long_description=long_description,
packages=find_packages(include=['pyro', 'pyro.*']),
url='http://pyro.ai',
author='Uber AI Labs',
author_email='pyro@uber.com',
install_requires=[
# if you add any additional libraries, please also
# add them to `docs/requirements.txt`
'contextlib2',
'graphviz>=0.8',
'networkx>=2.2',
'numpy>=1.7',
'opt_einsum>=2.2.0',
'six>=1.10.0',
'torch==0.4.0',
'tqdm>=4.25',
],
extras_require={
'extras': EXTRAS_REQUIRE,
'test': EXTRAS_REQUIRE + [
'nbval',
'pytest==3.7',
'pytest-cov',
'scipy>=0.19.0',
'ipython<=6.5.0', # https://github.com/jupyter/jupyter_console/issues/158
],
'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],
'dev': EXTRAS_REQUIRE + [
'flake8',
'isort',
'nbformat',
'nbsphinx>=0.3.2',
'nbstripout',
'nbval',
'pypandoc',
'pytest==3.7',
'pytest-xdist',
'ipython<=6.5.0', # https://github.com/jupyter/jupyter_console/issues/158
'scipy>=0.19.0',
'sphinx',
'sphinx_rtd_theme',
'yapf',
],
},
tests_require=['flake8', 'pytest==3.7'],
keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',
license='MIT License',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
# yapf
)
| 31.284615
| 99
| 0.601672
|
from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
from setuptools import find_packages, setup
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
VERSION = """
# This file is auto-generated with the version information during setup.py installation.
__version__ = '{}'
"""
for line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):
if line.startswith('version_prefix = '):
version = line.strip().split()[2][1:-1]
commit_sha = ''
try:
commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],
cwd=PROJECT_PATH).decode('ascii').strip()
except OSError:
pass
if commit_sha:
version += '+{}'.format(commit_sha)
with open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:
f.write(VERSION.format(version))
pandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError) as e:
sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e))
sys.stderr.flush()
long_description = open('README.md').read()
blacklist = ['Build Status', 'Latest Version', 'Documentation Status',
'travis-ci.org', 'pypi.python.org', 'pyro-ppl.readthedocs.io']
long_description = '\n'.join(
[line for line in long_description.split('\n') if not any(patt in line for patt in blacklist)])
EXTRAS_REQUIRE = [
'jupyter>=1.0.0',
'matplotlib>=1.3',
'observations>=0.1.4',
'pillow',
'torchvision',
'visdom>=0.1.4',
'pandas',
'wget',
]
if sys.version_info[0] == 2:
EXTRAS_REQUIRE.append('functools32')
setup(
name='pyro-ppl',
version=version,
description='A Python library for probabilistic modeling and inference',
long_description=long_description,
packages=find_packages(include=['pyro', 'pyro.*']),
url='http://pyro.ai',
author='Uber AI Labs',
author_email='pyro@uber.com',
install_requires=[
'contextlib2',
'graphviz>=0.8',
'networkx>=2.2',
'numpy>=1.7',
'opt_einsum>=2.2.0',
'six>=1.10.0',
'torch==0.4.0',
'tqdm>=4.25',
],
extras_require={
'extras': EXTRAS_REQUIRE,
'test': EXTRAS_REQUIRE + [
'nbval',
'pytest==3.7',
'pytest-cov',
'scipy>=0.19.0',
'ipython<=6.5.0',
],
'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],
'dev': EXTRAS_REQUIRE + [
'flake8',
'isort',
'nbformat',
'nbsphinx>=0.3.2',
'nbstripout',
'nbval',
'pypandoc',
'pytest==3.7',
'pytest-xdist',
'ipython<=6.5.0',
'scipy>=0.19.0',
'sphinx',
'sphinx_rtd_theme',
'yapf',
],
},
tests_require=['flake8', 'pytest==3.7'],
keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',
license='MIT License',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
)
| true
| true
|
7908bc71001894c0e07b02b3cbd50c62d3c4f09d
| 926
|
py
|
Python
|
runners/scenario_runner.py
|
cgeller/WorldOnRails
|
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
|
[
"MIT"
] | 108
|
2021-05-04T02:13:04.000Z
|
2022-03-24T02:11:55.000Z
|
runners/scenario_runner.py
|
cgeller/WorldOnRails
|
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
|
[
"MIT"
] | 45
|
2021-05-10T13:32:51.000Z
|
2022-03-23T07:23:19.000Z
|
runners/scenario_runner.py
|
cgeller/WorldOnRails
|
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
|
[
"MIT"
] | 22
|
2021-05-04T16:38:17.000Z
|
2022-03-25T16:40:00.000Z
|
import ray
from copy import deepcopy
from leaderboard.leaderboard_evaluator import LeaderboardEvaluator
from leaderboard.utils.statistics_manager import StatisticsManager
@ray.remote(num_cpus=1./8, num_gpus=1./4, max_restarts=100, max_task_retries=-1)
class ScenarioRunner():
def __init__(self, args, scenario_class, scenario, route, checkpoint='simulation_results.json', town=None, port=1000, tm_port=1002, debug=False):
args = deepcopy(args)
# Inject args
args.scenario_class = scenario_class
args.town = town
args.port = port
args.trafficManagerPort = tm_port
args.scenarios = scenario
args.routes = route
args.debug = debug
args.checkpoint = checkpoint
args.record = ''
self.runner = LeaderboardEvaluator(args, StatisticsManager())
self.args = args
def run(self):
return self.runner.run(self.args)
| 34.296296
| 149
| 0.695464
|
import ray
from copy import deepcopy
from leaderboard.leaderboard_evaluator import LeaderboardEvaluator
from leaderboard.utils.statistics_manager import StatisticsManager
@ray.remote(num_cpus=1./8, num_gpus=1./4, max_restarts=100, max_task_retries=-1)
class ScenarioRunner():
def __init__(self, args, scenario_class, scenario, route, checkpoint='simulation_results.json', town=None, port=1000, tm_port=1002, debug=False):
args = deepcopy(args)
args.scenario_class = scenario_class
args.town = town
args.port = port
args.trafficManagerPort = tm_port
args.scenarios = scenario
args.routes = route
args.debug = debug
args.checkpoint = checkpoint
args.record = ''
self.runner = LeaderboardEvaluator(args, StatisticsManager())
self.args = args
def run(self):
return self.runner.run(self.args)
| true
| true
|
7908bc8a3329f40cac7ca19d569c48fa7a4ffa38
| 18,537
|
py
|
Python
|
models/official/amoeba_net/amoeba_net.py
|
priumoraes/tpu
|
c7fbe70f00956e802c23c9e831d7482613968fa7
|
[
"Apache-2.0"
] | 5
|
2019-03-04T02:24:19.000Z
|
2020-12-17T16:04:22.000Z
|
models/official/amoeba_net/amoeba_net.py
|
priumoraes/tpu
|
c7fbe70f00956e802c23c9e831d7482613968fa7
|
[
"Apache-2.0"
] | 1
|
2019-08-20T04:44:50.000Z
|
2019-08-20T04:44:50.000Z
|
models/official/amoeba_net/amoeba_net.py
|
priumoraes/tpu
|
c7fbe70f00956e802c23c9e831d7482613968fa7
|
[
"Apache-2.0"
] | 2
|
2019-02-28T12:22:39.000Z
|
2020-01-07T06:05:54.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""TensorFlow AmoebaNet Example.
GCP Run Example
python amoeba_net.py --data_dir=gs://cloud-tpu-datasets/imagenet-data --model_dir=gs://cloud-tpu-ckpts/models/ameoba_net_x/ \
--drop_connect_keep_prob=1.0 --cell_name=evol_net_x --num_cells=12 --reduction_size=256 --image_size=299 --num_epochs=48 \
--train_batch_size=256 --num_epochs_per_eval=4.0 --lr_decay_value=0.89 --lr_num_epochs_per_decay=1 --alsologtostderr \
--tpu=huangyp-tpu-0
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import itertools
import math
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
from PIL import Image
import tensorflow as tf
import amoeba_net_model as model_lib
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# General Parameters
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (TPU cores).')
flags.DEFINE_integer(
'distributed_group_size', 1,
help='Size of the distributed batch norm. group.'
'Default is normalization over local examples only.'
'When set to a value greater than 1, it will enable'
'a distribtued batch norm. To enable a global batch norm.'
'set distributed_group_size to FLAGS.num_shards')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than CPU or GPU.')
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir', None,
'The directory where the exported SavedModel will be stored.')
flags.DEFINE_bool(
'export_to_tpu', False,
help='Whether to export additional metagraph with "serve, tpu" tags'
' in addition to "serve" only metagraph.')
flags.DEFINE_integer(
'iterations_per_loop', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer(
'train_batch_size', 256,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_batch_size', 256,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_float(
'num_epochs', 48.,
'Number of steps use for training.')
flags.DEFINE_float(
'num_epochs_per_eval', 1.,
'Number of training epochs to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval, or predict')
flags.DEFINE_integer(
'save_checkpoints_steps', None,
'Interval (in steps) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'enable_hostcall', True,
'Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --enable_hostcall=True, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.')
# Model specific parameters
flags.DEFINE_bool('use_aux_head', True, 'Include aux head or not.')
flags.DEFINE_float(
'aux_scaling', 0.4, 'Scaling factor of aux_head')
flags.DEFINE_float(
'batch_norm_decay', 0.9, 'Batch norm decay.')
flags.DEFINE_float(
'batch_norm_epsilon', 1e-5, 'Batch norm epsilon.')
flags.DEFINE_float(
'dense_dropout_keep_prob', None, 'Dense dropout keep probability.')
flags.DEFINE_float(
'drop_connect_keep_prob', 1.0, 'Drop connect keep probability.')
flags.DEFINE_string(
'drop_connect_version', None, 'Drop connect version.')
flags.DEFINE_string(
'cell_name', 'amoeba_net_d', 'Which network to run.')
flags.DEFINE_integer(
'num_cells', 12, 'Total number of cells.')
flags.DEFINE_integer(
'reduction_size', 256, 'Default cell reduction size.')
flags.DEFINE_integer(
'stem_reduction_size', 32, 'Stem filter size.')
flags.DEFINE_float(
'weight_decay', 4e-05, 'Weight decay for slim model.')
flags.DEFINE_integer(
'num_label_classes', 1001, 'The number of classes that images fit into.')
# Training hyper-parameters
flags.DEFINE_float(
'lr', 0.64, 'Learning rate.')
flags.DEFINE_string(
'optimizer', 'rmsprop',
'Optimizer (one of sgd, rmsprop, momentum)')
flags.DEFINE_float(
'moving_average_decay', 0.9999,
'moving average decay rate')
flags.DEFINE_float(
'lr_decay_value', 0.9,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'lr_num_epochs_per_decay', 1,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_string(
'lr_decay_method', 'exponential',
'Method of decay: exponential, cosine, constant, stepwise')
flags.DEFINE_float(
'lr_warmup_epochs', 3.0,
'Learning rate increased from zero linearly to lr for the first '
'lr_warmup_epochs.')
flags.DEFINE_float('gradient_clipping_by_global_norm', 0,
'gradient_clipping_by_global_norm')
flags.DEFINE_integer(
'image_size', 299, 'Size of image, assuming image height and width.')
flags.DEFINE_integer(
'num_train_images', 1281167, 'The number of images in the training set.')
flags.DEFINE_integer(
'num_eval_images', 50000, 'The number of images in the evaluation set.')
flags.DEFINE_bool(
'use_bp16', True, 'If True, use bfloat16 for activations')
flags.DEFINE_integer(
'eval_timeout', 60*60*24,
'Maximum seconds between checkpoints before evaluation terminates.')
# Inference configuration.
flags.DEFINE_bool(
'inference_with_all_cores', True, 'Whether to round-robin'
'among all cores visible to the host for TPU inference.')
flags.DEFINE_bool(
'add_warmup_requests', True,
'Whether to add warmup requests into the export saved model dir,'
'especially for TPU inference.')
flags.DEFINE_string('model_name', 'amoeba_net',
'Serving model name used for the model server.')
flags.DEFINE_multi_integer(
'inference_batch_sizes', [8],
'Known inference batch sizes used to warm up for each core.')
FLAGS = flags.FLAGS
def build_run_config():
"""Return RunConfig for TPU estimator."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size
iterations_per_loop = (eval_steps if FLAGS.mode == 'eval'
else FLAGS.iterations_per_loop)
save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_shards,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
))
return run_config
def build_image_serving_input_receiver_fn(shape,
dtype=tf.float32):
"""Returns a input_receiver_fn for raw images during serving."""
def _preprocess_image(encoded_image):
"""Preprocess a single raw image."""
image = tf.image.decode_image(encoded_image, channels=shape[-1])
image.set_shape(shape)
return tf.cast(image, dtype)
def serving_input_receiver_fn():
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
features=images, receiver_tensors=image_bytes_list)
return serving_input_receiver_fn
def _encode_image(image_array, fmt='PNG'):
"""encodes an (numpy) image array to string.
Args:
image_array: (numpy) image array
fmt: image format to use
Returns:
encoded image string
"""
pil_image = Image.fromarray(image_array)
image_io = io.BytesIO()
pil_image.save(image_io, format=fmt)
return image_io.getvalue()
def write_warmup_requests(savedmodel_dir,
model_name,
image_size,
batch_sizes=None,
num_requests=8):
"""Writes warmup requests for inference into a tfrecord file.
Args:
savedmodel_dir: string, the file to the exported model folder.
model_name: string, a model name used inside the model server.
image_size: int, size of image, assuming image height and width.
batch_sizes: list, a list of batch sizes to create different input requests.
num_requests: int, number of requests per batch size.
Raises:
ValueError: if batch_sizes is not a valid integer list.
"""
if not isinstance(batch_sizes, list) or not batch_sizes:
raise ValueError('batch sizes should be a valid non-empty list.')
extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')
tf.gfile.MkDir(extra_assets_dir)
with tf.python_io.TFRecordWriter(
os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer:
for batch_size in batch_sizes:
for _ in range(num_requests):
request = predict_pb2.PredictRequest()
image = np.uint8(np.random.rand(image_size, image_size, 3) * 255)
request.inputs['input'].CopyFrom(
tf.make_tensor_proto(
[_encode_image(image)] * batch_size, shape=[batch_size]))
request.model_spec.name = model_name
request.model_spec.signature_name = 'serving_default'
log = prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
writer.write(log.SerializeToString())
# TODO(ereal): simplify this.
def override_with_flags(hparams):
"""Overrides parameters with flag values."""
override_flag_names = [
'aux_scaling',
'train_batch_size',
'batch_norm_decay',
'batch_norm_epsilon',
'dense_dropout_keep_prob',
'drop_connect_keep_prob',
'drop_connect_version',
'eval_batch_size',
'gradient_clipping_by_global_norm',
'lr',
'lr_decay_method',
'lr_decay_value',
'lr_num_epochs_per_decay',
'moving_average_decay',
'image_size',
'num_cells',
'reduction_size',
'stem_reduction_size',
'num_epochs',
'num_epochs_per_eval',
'optimizer',
'enable_hostcall',
'use_aux_head',
'use_bp16',
'use_tpu',
'lr_warmup_epochs',
'weight_decay',
'num_shards',
'distributed_group_size',
'num_train_images',
'num_eval_images',
'num_label_classes',
]
for flag_name in override_flag_names:
flag_value = getattr(FLAGS, flag_name, 'INVALID')
if flag_value == 'INVALID':
tf.logging.fatal('Unknown flag %s.' % str(flag_name))
if flag_value is not None:
_set_or_add_hparam(hparams, flag_name, flag_value)
def build_hparams():
"""Build tf.Hparams for training Amoeba Net."""
hparams = model_lib.build_hparams(FLAGS.cell_name)
override_with_flags(hparams)
return hparams
def _terminate_eval():
tf.logging.info('Timeout passed with no new checkpoints ... terminating eval')
return True
def _get_next_checkpoint():
return tf.contrib.training.checkpoints_iterator(
FLAGS.model_dir,
timeout=FLAGS.eval_timeout,
timeout_fn=_terminate_eval)
def _set_or_add_hparam(hparams, name, value):
if getattr(hparams, name, None) is None:
hparams.add_hparam(name, value)
else:
hparams.set_hparam(name, value)
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def main(_):
mode = FLAGS.mode
data_dir = FLAGS.data_dir
model_dir = FLAGS.model_dir
hparams = build_hparams()
estimator_parmas = {}
train_steps_per_epoch = int(
math.ceil(hparams.num_train_images / float(hparams.train_batch_size)))
eval_steps = hparams.num_eval_images // hparams.eval_batch_size
eval_batch_size = (None if mode == 'train' else
hparams.eval_batch_size)
model = model_lib.AmoebaNetEstimatorModel(hparams, model_dir)
if hparams.use_tpu:
run_config = build_run_config()
image_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=model.model_fn,
use_tpu=True,
config=run_config,
params=estimator_parmas,
predict_batch_size=eval_batch_size,
train_batch_size=hparams.train_batch_size,
eval_batch_size=eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu,
experimental_exported_model_uses_all_cores=FLAGS
.inference_with_all_cores)
else:
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or
FLAGS.iterations_per_loop)
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps)
image_classifier = tf.estimator.Estimator(
model_fn=model.model_fn,
config=run_config,
params=estimator_parmas)
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = model_lib.InputPipeline(
is_training=True, data_dir=data_dir, hparams=hparams)
imagenet_eval = model_lib.InputPipeline(
is_training=False, data_dir=data_dir, hparams=hparams)
if hparams.moving_average_decay < 1:
eval_hooks = [model_lib.LoadEMAHook(model_dir,
hparams.moving_average_decay)]
else:
eval_hooks = []
if mode == 'eval':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting to evaluate.')
try:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
tf.logging.info('Evaluation results: %s' % eval_results)
except tf.errors.NotFoundError:
# skip checkpoint if it gets deleted prior to evaluation
tf.logging.info('Checkpoint %s no longer exists ... skipping')
elif mode == 'train_and_eval':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
tf.logging.info('Starting training at step=%d.' % current_step)
train_steps_per_eval = int(
hparams.num_epochs_per_eval * train_steps_per_epoch)
# Final Evaluation if training is finished.
if current_step >= hparams.num_epochs * train_steps_per_epoch:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
while current_step < hparams.num_epochs * train_steps_per_epoch:
image_classifier.train(
input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)
current_step += train_steps_per_eval
tf.logging.info('Starting evaluation at step=%d.' % current_step)
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
elif mode == 'predict':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting prediction ...')
time_hook = model_lib.SessionTimingHook()
eval_hooks.append(time_hook)
result_iter = image_classifier.predict(
input_fn=imagenet_eval.input_fn,
hooks=eval_hooks,
checkpoint_path=checkpoint,
yield_single_examples=False)
results = list(itertools.islice(result_iter, eval_steps))
tf.logging.info('Inference speed = {} images per second.'.format(
time_hook.compute_speed(len(results) * eval_batch_size)))
elif mode == 'train':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
total_step = int(hparams.num_epochs * train_steps_per_epoch)
if current_step < total_step:
tf.logging.info('Starting training ...')
image_classifier.train(
input_fn=imagenet_train.input_fn,
steps=total_step-current_step)
else:
tf.logging.info('Mode not found.')
if FLAGS.export_dir is not None:
tf.logging.info('Starting exporting saved model ...')
serving_shape = [hparams.image_size, hparams.image_size, 3]
export_path = image_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=build_image_serving_input_receiver_fn(
serving_shape),
as_text=True)
if FLAGS.add_warmup_requests:
write_warmup_requests(
export_path,
FLAGS.model_name,
hparams.image_size,
batch_sizes=FLAGS.inference_batch_sizes)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| 35.579655
| 125
| 0.710579
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import itertools
import math
import os
from absl import app
from absl import flags
import absl.logging as _logging
import numpy as np
from PIL import Image
import tensorflow as tf
import amoeba_net_model as model_lib
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (TPU cores).')
flags.DEFINE_integer(
'distributed_group_size', 1,
help='Size of the distributed batch norm. group.'
'Default is normalization over local examples only.'
'When set to a value greater than 1, it will enable'
'a distribtued batch norm. To enable a global batch norm.'
'set distributed_group_size to FLAGS.num_shards')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than CPU or GPU.')
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir', None,
'The directory where the exported SavedModel will be stored.')
flags.DEFINE_bool(
'export_to_tpu', False,
help='Whether to export additional metagraph with "serve, tpu" tags'
' in addition to "serve" only metagraph.')
flags.DEFINE_integer(
'iterations_per_loop', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer(
'train_batch_size', 256,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_batch_size', 256,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_float(
'num_epochs', 48.,
'Number of steps use for training.')
flags.DEFINE_float(
'num_epochs_per_eval', 1.,
'Number of training epochs to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval, or predict')
flags.DEFINE_integer(
'save_checkpoints_steps', None,
'Interval (in steps) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'enable_hostcall', True,
'Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --enable_hostcall=True, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.')
flags.DEFINE_bool('use_aux_head', True, 'Include aux head or not.')
flags.DEFINE_float(
'aux_scaling', 0.4, 'Scaling factor of aux_head')
flags.DEFINE_float(
'batch_norm_decay', 0.9, 'Batch norm decay.')
flags.DEFINE_float(
'batch_norm_epsilon', 1e-5, 'Batch norm epsilon.')
flags.DEFINE_float(
'dense_dropout_keep_prob', None, 'Dense dropout keep probability.')
flags.DEFINE_float(
'drop_connect_keep_prob', 1.0, 'Drop connect keep probability.')
flags.DEFINE_string(
'drop_connect_version', None, 'Drop connect version.')
flags.DEFINE_string(
'cell_name', 'amoeba_net_d', 'Which network to run.')
flags.DEFINE_integer(
'num_cells', 12, 'Total number of cells.')
flags.DEFINE_integer(
'reduction_size', 256, 'Default cell reduction size.')
flags.DEFINE_integer(
'stem_reduction_size', 32, 'Stem filter size.')
flags.DEFINE_float(
'weight_decay', 4e-05, 'Weight decay for slim model.')
flags.DEFINE_integer(
'num_label_classes', 1001, 'The number of classes that images fit into.')
flags.DEFINE_float(
'lr', 0.64, 'Learning rate.')
flags.DEFINE_string(
'optimizer', 'rmsprop',
'Optimizer (one of sgd, rmsprop, momentum)')
flags.DEFINE_float(
'moving_average_decay', 0.9999,
'moving average decay rate')
flags.DEFINE_float(
'lr_decay_value', 0.9,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'lr_num_epochs_per_decay', 1,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_string(
'lr_decay_method', 'exponential',
'Method of decay: exponential, cosine, constant, stepwise')
flags.DEFINE_float(
'lr_warmup_epochs', 3.0,
'Learning rate increased from zero linearly to lr for the first '
'lr_warmup_epochs.')
flags.DEFINE_float('gradient_clipping_by_global_norm', 0,
'gradient_clipping_by_global_norm')
flags.DEFINE_integer(
'image_size', 299, 'Size of image, assuming image height and width.')
flags.DEFINE_integer(
'num_train_images', 1281167, 'The number of images in the training set.')
flags.DEFINE_integer(
'num_eval_images', 50000, 'The number of images in the evaluation set.')
flags.DEFINE_bool(
'use_bp16', True, 'If True, use bfloat16 for activations')
flags.DEFINE_integer(
'eval_timeout', 60*60*24,
'Maximum seconds between checkpoints before evaluation terminates.')
flags.DEFINE_bool(
'inference_with_all_cores', True, 'Whether to round-robin'
'among all cores visible to the host for TPU inference.')
flags.DEFINE_bool(
'add_warmup_requests', True,
'Whether to add warmup requests into the export saved model dir,'
'especially for TPU inference.')
flags.DEFINE_string('model_name', 'amoeba_net',
'Serving model name used for the model server.')
flags.DEFINE_multi_integer(
'inference_batch_sizes', [8],
'Known inference batch sizes used to warm up for each core.')
FLAGS = flags.FLAGS
def build_run_config():
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size
iterations_per_loop = (eval_steps if FLAGS.mode == 'eval'
else FLAGS.iterations_per_loop)
save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_shards,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
))
return run_config
def build_image_serving_input_receiver_fn(shape,
dtype=tf.float32):
def _preprocess_image(encoded_image):
image = tf.image.decode_image(encoded_image, channels=shape[-1])
image.set_shape(shape)
return tf.cast(image, dtype)
def serving_input_receiver_fn():
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
features=images, receiver_tensors=image_bytes_list)
return serving_input_receiver_fn
def _encode_image(image_array, fmt='PNG'):
pil_image = Image.fromarray(image_array)
image_io = io.BytesIO()
pil_image.save(image_io, format=fmt)
return image_io.getvalue()
def write_warmup_requests(savedmodel_dir,
model_name,
image_size,
batch_sizes=None,
num_requests=8):
if not isinstance(batch_sizes, list) or not batch_sizes:
raise ValueError('batch sizes should be a valid non-empty list.')
extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')
tf.gfile.MkDir(extra_assets_dir)
with tf.python_io.TFRecordWriter(
os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer:
for batch_size in batch_sizes:
for _ in range(num_requests):
request = predict_pb2.PredictRequest()
image = np.uint8(np.random.rand(image_size, image_size, 3) * 255)
request.inputs['input'].CopyFrom(
tf.make_tensor_proto(
[_encode_image(image)] * batch_size, shape=[batch_size]))
request.model_spec.name = model_name
request.model_spec.signature_name = 'serving_default'
log = prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
writer.write(log.SerializeToString())
def override_with_flags(hparams):
override_flag_names = [
'aux_scaling',
'train_batch_size',
'batch_norm_decay',
'batch_norm_epsilon',
'dense_dropout_keep_prob',
'drop_connect_keep_prob',
'drop_connect_version',
'eval_batch_size',
'gradient_clipping_by_global_norm',
'lr',
'lr_decay_method',
'lr_decay_value',
'lr_num_epochs_per_decay',
'moving_average_decay',
'image_size',
'num_cells',
'reduction_size',
'stem_reduction_size',
'num_epochs',
'num_epochs_per_eval',
'optimizer',
'enable_hostcall',
'use_aux_head',
'use_bp16',
'use_tpu',
'lr_warmup_epochs',
'weight_decay',
'num_shards',
'distributed_group_size',
'num_train_images',
'num_eval_images',
'num_label_classes',
]
for flag_name in override_flag_names:
flag_value = getattr(FLAGS, flag_name, 'INVALID')
if flag_value == 'INVALID':
tf.logging.fatal('Unknown flag %s.' % str(flag_name))
if flag_value is not None:
_set_or_add_hparam(hparams, flag_name, flag_value)
def build_hparams():
hparams = model_lib.build_hparams(FLAGS.cell_name)
override_with_flags(hparams)
return hparams
def _terminate_eval():
tf.logging.info('Timeout passed with no new checkpoints ... terminating eval')
return True
def _get_next_checkpoint():
return tf.contrib.training.checkpoints_iterator(
FLAGS.model_dir,
timeout=FLAGS.eval_timeout,
timeout_fn=_terminate_eval)
def _set_or_add_hparam(hparams, name, value):
if getattr(hparams, name, None) is None:
hparams.add_hparam(name, value)
else:
hparams.set_hparam(name, value)
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
except:
return 0
def main(_):
mode = FLAGS.mode
data_dir = FLAGS.data_dir
model_dir = FLAGS.model_dir
hparams = build_hparams()
estimator_parmas = {}
train_steps_per_epoch = int(
math.ceil(hparams.num_train_images / float(hparams.train_batch_size)))
eval_steps = hparams.num_eval_images // hparams.eval_batch_size
eval_batch_size = (None if mode == 'train' else
hparams.eval_batch_size)
model = model_lib.AmoebaNetEstimatorModel(hparams, model_dir)
if hparams.use_tpu:
run_config = build_run_config()
image_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=model.model_fn,
use_tpu=True,
config=run_config,
params=estimator_parmas,
predict_batch_size=eval_batch_size,
train_batch_size=hparams.train_batch_size,
eval_batch_size=eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu,
experimental_exported_model_uses_all_cores=FLAGS
.inference_with_all_cores)
else:
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or
FLAGS.iterations_per_loop)
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps)
image_classifier = tf.estimator.Estimator(
model_fn=model.model_fn,
config=run_config,
params=estimator_parmas)
imagenet_train = model_lib.InputPipeline(
is_training=True, data_dir=data_dir, hparams=hparams)
imagenet_eval = model_lib.InputPipeline(
is_training=False, data_dir=data_dir, hparams=hparams)
if hparams.moving_average_decay < 1:
eval_hooks = [model_lib.LoadEMAHook(model_dir,
hparams.moving_average_decay)]
else:
eval_hooks = []
if mode == 'eval':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting to evaluate.')
try:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
tf.logging.info('Evaluation results: %s' % eval_results)
except tf.errors.NotFoundError:
tf.logging.info('Checkpoint %s no longer exists ... skipping')
elif mode == 'train_and_eval':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
tf.logging.info('Starting training at step=%d.' % current_step)
train_steps_per_eval = int(
hparams.num_epochs_per_eval * train_steps_per_epoch)
if current_step >= hparams.num_epochs * train_steps_per_epoch:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
while current_step < hparams.num_epochs * train_steps_per_epoch:
image_classifier.train(
input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)
current_step += train_steps_per_eval
tf.logging.info('Starting evaluation at step=%d.' % current_step)
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
elif mode == 'predict':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting prediction ...')
time_hook = model_lib.SessionTimingHook()
eval_hooks.append(time_hook)
result_iter = image_classifier.predict(
input_fn=imagenet_eval.input_fn,
hooks=eval_hooks,
checkpoint_path=checkpoint,
yield_single_examples=False)
results = list(itertools.islice(result_iter, eval_steps))
tf.logging.info('Inference speed = {} images per second.'.format(
time_hook.compute_speed(len(results) * eval_batch_size)))
elif mode == 'train':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
total_step = int(hparams.num_epochs * train_steps_per_epoch)
if current_step < total_step:
tf.logging.info('Starting training ...')
image_classifier.train(
input_fn=imagenet_train.input_fn,
steps=total_step-current_step)
else:
tf.logging.info('Mode not found.')
if FLAGS.export_dir is not None:
tf.logging.info('Starting exporting saved model ...')
serving_shape = [hparams.image_size, hparams.image_size, 3]
export_path = image_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=build_image_serving_input_receiver_fn(
serving_shape),
as_text=True)
if FLAGS.add_warmup_requests:
write_warmup_requests(
export_path,
FLAGS.model_name,
hparams.image_size,
batch_sizes=FLAGS.inference_batch_sizes)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| true
| true
|
7908bccf46311ce8d6596059d053fe7388c9c69c
| 3,880
|
py
|
Python
|
research/cognitive_mapping_and_planning/datasets/factory.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | 1
|
2021-05-17T01:42:29.000Z
|
2021-05-17T01:42:29.000Z
|
research/cognitive_mapping_and_planning/datasets/factory.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
research/cognitive_mapping_and_planning/datasets/factory.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Wrapper for selecting the navigation environment that we want to train and
test on.
"""
import os
import glob
import logging
from research.cognitive_mapping_and_planning.render import swiftshader_renderer as renderer
from research.cognitive_mapping_and_planning.src import file_utils as fu
from research.cognitive_mapping_and_planning.src import utils as utils
def get_dataset(dataset_name):
dataset = None
if dataset_name == 'sbpd':
dataset = StanfordBuildingParserDataset(dataset_name)
else:
logging.fatal('Not one of sbpd')
return dataset
class Loader():
def get_data_dir(self):
pass
def get_meta_data(self, file_name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
full_file_name = os.path.join(data_dir, 'meta', file_name)
assert (fu.exists(full_file_name)), \
'{:s} does not exist'.format(full_file_name)
ext = os.path.splitext(full_file_name)[1]
ls = None
if ext == '.txt':
ls = []
with fu.fopen(full_file_name, 'r') as f:
for l in f:
ls.append(l.rstrip())
elif ext == '.pkl':
ls = utils.load_variables(full_file_name)
return ls
def load_building(self, name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
out = {'name': name, 'data_dir': data_dir,
'room_dimension_file': os.path.join(data_dir, 'room-dimension',
name + '.pkl'),
'class_map_folder': os.path.join(data_dir, 'class-maps')}
return out
def load_building_meshes(self, building):
dir_name = os.path.join(building['data_dir'], 'mesh', building['name'])
mesh_file_name = glob.glob1(dir_name, '*.obj')[0]
mesh_file_name_full = os.path.join(dir_name, mesh_file_name)
logging.error('Loading building from obj file: %s', mesh_file_name_full)
shape = renderer.Shape(mesh_file_name_full, load_materials=True,
name_prefix=building['name'] + '_')
return [shape]
class StanfordBuildingParserDataset(Loader):
def __init__(self, ver):
self.ver = ver
self.data_dir = None
def get_data_dir(self):
if self.data_dir is None:
self.data_dir = 'data/stanford_building_parser_dataset/'
return self.data_dir
def get_benchmark_sets(self):
return self._get_benchmark_sets()
def get_split(self, split_name):
if self.ver == 'sbpd':
return self._get_split(split_name)
else:
logging.fatal('Unknown version.')
@staticmethod
def _get_benchmark_sets():
sets = ['train1', 'val', 'test']
return sets
@staticmethod
def _get_split(split_name):
train = ['area1', 'area5a', 'area5b', 'area6']
train1 = ['area1']
val = ['area3']
test = ['area4']
sets = {'train': train, 'train1': train1, 'val': val, 'test': test,
'all': sorted(list(set(train + val + test)))}
return sets[split_name]
| 34.954955
| 91
| 0.620619
|
import os
import glob
import logging
from research.cognitive_mapping_and_planning.render import swiftshader_renderer as renderer
from research.cognitive_mapping_and_planning.src import file_utils as fu
from research.cognitive_mapping_and_planning.src import utils as utils
def get_dataset(dataset_name):
dataset = None
if dataset_name == 'sbpd':
dataset = StanfordBuildingParserDataset(dataset_name)
else:
logging.fatal('Not one of sbpd')
return dataset
class Loader():
def get_data_dir(self):
pass
def get_meta_data(self, file_name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
full_file_name = os.path.join(data_dir, 'meta', file_name)
assert (fu.exists(full_file_name)), \
'{:s} does not exist'.format(full_file_name)
ext = os.path.splitext(full_file_name)[1]
ls = None
if ext == '.txt':
ls = []
with fu.fopen(full_file_name, 'r') as f:
for l in f:
ls.append(l.rstrip())
elif ext == '.pkl':
ls = utils.load_variables(full_file_name)
return ls
def load_building(self, name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
out = {'name': name, 'data_dir': data_dir,
'room_dimension_file': os.path.join(data_dir, 'room-dimension',
name + '.pkl'),
'class_map_folder': os.path.join(data_dir, 'class-maps')}
return out
def load_building_meshes(self, building):
dir_name = os.path.join(building['data_dir'], 'mesh', building['name'])
mesh_file_name = glob.glob1(dir_name, '*.obj')[0]
mesh_file_name_full = os.path.join(dir_name, mesh_file_name)
logging.error('Loading building from obj file: %s', mesh_file_name_full)
shape = renderer.Shape(mesh_file_name_full, load_materials=True,
name_prefix=building['name'] + '_')
return [shape]
class StanfordBuildingParserDataset(Loader):
def __init__(self, ver):
self.ver = ver
self.data_dir = None
def get_data_dir(self):
if self.data_dir is None:
self.data_dir = 'data/stanford_building_parser_dataset/'
return self.data_dir
def get_benchmark_sets(self):
return self._get_benchmark_sets()
def get_split(self, split_name):
if self.ver == 'sbpd':
return self._get_split(split_name)
else:
logging.fatal('Unknown version.')
@staticmethod
def _get_benchmark_sets():
sets = ['train1', 'val', 'test']
return sets
@staticmethod
def _get_split(split_name):
train = ['area1', 'area5a', 'area5b', 'area6']
train1 = ['area1']
val = ['area3']
test = ['area4']
sets = {'train': train, 'train1': train1, 'val': val, 'test': test,
'all': sorted(list(set(train + val + test)))}
return sets[split_name]
| true
| true
|
7908bd45052c18dbdf5a736bf51d3796cd76a240
| 3,011
|
py
|
Python
|
Explosion.py
|
P3D-Space-Tech-Demo/Section2SpaceflightDocking
|
d47c18f6e53a92564130f0fa1b70d72cfb1f6229
|
[
"BSD-3-Clause"
] | null | null | null |
Explosion.py
|
P3D-Space-Tech-Demo/Section2SpaceflightDocking
|
d47c18f6e53a92564130f0fa1b70d72cfb1f6229
|
[
"BSD-3-Clause"
] | 1
|
2021-05-19T22:50:41.000Z
|
2021-06-02T03:00:04.000Z
|
Explosion.py
|
P3D-Space-Tech-Demo/Section2SpaceflightDocking
|
d47c18f6e53a92564130f0fa1b70d72cfb1f6229
|
[
"BSD-3-Clause"
] | null | null | null |
from panda3d.core import CardMaker, Shader, Vec3, Vec2, NodePath, ColorBlendAttrib
from Section2SpaceflightDocking.Common import Common
import random
class Explosion():
cardMaker = None
@staticmethod
def getCard():
if Explosion.cardMaker is None:
Explosion.cardMaker = CardMaker("explosion maker")
Explosion.cardMaker.setFrame(-1, 1, -1, 1)
explosionCard = NodePath(Explosion.cardMaker.generate())
return explosionCard
def __init__(self, size, shaderName, shaderInputs, inputTextureName, randomVal1, randomVal2):
self.explosionCard = Explosion.getCard()
self.explosionCard.setScale(size)
self.explosionCard.setBin("unsorted", 1)
self.explosionCard.setDepthWrite(False)
self.explosionCard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne))
self.explosionCard.setBillboardPointEye()
shader = Shader.load(Shader.SL_GLSL,
"../Section2SpaceflightDocking/Shaders/{0}Vertex.glsl".format(shaderName),
"../Section2SpaceflightDocking/Shaders/{0}Fragment.glsl".format(shaderName))
self.explosionCard.setShader(shader)
for inputName, inputValue in shaderInputs.items():
self.explosionCard.setShaderInput(inputName, inputValue)
self.explosionCard.setShaderInput("sourceTex1", Common.framework.showBase.loader.loadTexture("../Section2SpaceflightDocking/Shaders/{0}1.png".format(inputTextureName)))
self.explosionCard.setShaderInput("sourceTex2", Common.framework.showBase.loader.loadTexture("../Section2SpaceflightDocking/Shaders/{0}2.png".format(inputTextureName)))
self.explosionCard.setShaderInput("randomisation1", randomVal1)
self.explosionCard.setShaderInput("randomisation2", randomVal2)
self.calcFullDuration(shaderInputs)
self.startTime = -1000
self.explosionCard.setShaderInput("startTime", self.startTime)
self.velocity = Vec3(0, 0, 0)
def calcFullDuration(self, shaderInputs):
self.duration = 0
if "duration" in shaderInputs:
self.duration += shaderInputs["duration"]
if "starDuration" in shaderInputs:
self.duration += shaderInputs["starDuration"]
def activate(self, velocity, pos):
self.startTime = globalClock.getRealTime()
self.explosionCard.setShaderInput("startTime", self.startTime)
self.velocity = velocity
self.explosionCard.reparentTo(Common.framework.showBase.render)
self.explosionCard.setPos(pos)
def update(self, dt):
self.explosionCard.setPos(self.explosionCard.getPos() + self.velocity*dt)
def isAlive(self):
return (globalClock.getRealTime() - self.startTime) < (self.duration)
def cleanup(self):
if self.explosionCard is not None:
self.explosionCard.removeNode()
self.explosionCard = None
| 41.246575
| 176
| 0.698107
|
from panda3d.core import CardMaker, Shader, Vec3, Vec2, NodePath, ColorBlendAttrib
from Section2SpaceflightDocking.Common import Common
import random
class Explosion():
cardMaker = None
@staticmethod
def getCard():
if Explosion.cardMaker is None:
Explosion.cardMaker = CardMaker("explosion maker")
Explosion.cardMaker.setFrame(-1, 1, -1, 1)
explosionCard = NodePath(Explosion.cardMaker.generate())
return explosionCard
def __init__(self, size, shaderName, shaderInputs, inputTextureName, randomVal1, randomVal2):
self.explosionCard = Explosion.getCard()
self.explosionCard.setScale(size)
self.explosionCard.setBin("unsorted", 1)
self.explosionCard.setDepthWrite(False)
self.explosionCard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne))
self.explosionCard.setBillboardPointEye()
shader = Shader.load(Shader.SL_GLSL,
"../Section2SpaceflightDocking/Shaders/{0}Vertex.glsl".format(shaderName),
"../Section2SpaceflightDocking/Shaders/{0}Fragment.glsl".format(shaderName))
self.explosionCard.setShader(shader)
for inputName, inputValue in shaderInputs.items():
self.explosionCard.setShaderInput(inputName, inputValue)
self.explosionCard.setShaderInput("sourceTex1", Common.framework.showBase.loader.loadTexture("../Section2SpaceflightDocking/Shaders/{0}1.png".format(inputTextureName)))
self.explosionCard.setShaderInput("sourceTex2", Common.framework.showBase.loader.loadTexture("../Section2SpaceflightDocking/Shaders/{0}2.png".format(inputTextureName)))
self.explosionCard.setShaderInput("randomisation1", randomVal1)
self.explosionCard.setShaderInput("randomisation2", randomVal2)
self.calcFullDuration(shaderInputs)
self.startTime = -1000
self.explosionCard.setShaderInput("startTime", self.startTime)
self.velocity = Vec3(0, 0, 0)
def calcFullDuration(self, shaderInputs):
self.duration = 0
if "duration" in shaderInputs:
self.duration += shaderInputs["duration"]
if "starDuration" in shaderInputs:
self.duration += shaderInputs["starDuration"]
def activate(self, velocity, pos):
self.startTime = globalClock.getRealTime()
self.explosionCard.setShaderInput("startTime", self.startTime)
self.velocity = velocity
self.explosionCard.reparentTo(Common.framework.showBase.render)
self.explosionCard.setPos(pos)
def update(self, dt):
self.explosionCard.setPos(self.explosionCard.getPos() + self.velocity*dt)
def isAlive(self):
return (globalClock.getRealTime() - self.startTime) < (self.duration)
def cleanup(self):
if self.explosionCard is not None:
self.explosionCard.removeNode()
self.explosionCard = None
| true
| true
|
7908bdaabbc44f574748fef5f0a7c7b097a203cb
| 4,452
|
py
|
Python
|
ocd_backend/items/nabeeldbank.py
|
fransward/open-cultuur-data
|
38db2476ad0c5c1328315d418ae92d6abe3a5f0b
|
[
"CC-BY-4.0"
] | 1
|
2019-02-07T14:32:29.000Z
|
2019-02-07T14:32:29.000Z
|
ocd_backend/items/nabeeldbank.py
|
fransward/open-cultuur-data
|
38db2476ad0c5c1328315d418ae92d6abe3a5f0b
|
[
"CC-BY-4.0"
] | null | null | null |
ocd_backend/items/nabeeldbank.py
|
fransward/open-cultuur-data
|
38db2476ad0c5c1328315d418ae92d6abe3a5f0b
|
[
"CC-BY-4.0"
] | null | null | null |
import re
from datetime import datetime
from ocd_backend.items import BaseItem
class NationaalArchiefBeeldbankItem(BaseItem):
R_IMG_RES = re.compile(r'http://.+/thumb/(?P<width>\d+)x(?P<height>\d+)/.+$')
def _get_text_or_none(self, xpath_expression):
node = self.original_item.find(xpath_expression, namespaces=self.original_item.nsmap)
if node is not None and node.text is not None:
return unicode(node.text)
return None
def _get_all_text(self, xpath_expression):
nodes = self.original_item.findall(xpath_expression, namespaces=self.original_item.nsmap)
texts = []
for node in nodes:
if node.text is not None:
texts.append(unicode(node.text))
return texts
def get_original_object_id(self):
return self._get_text_or_none('.//item/guid').split('/')[-1]
def get_original_object_urls(self):
link = self._get_text_or_none('.//item/link')
if link:
return {'html': link}
return {}
def get_rights(self):
return u'Creative Commons Attribution-ShareAlike'
def get_collection(self):
return u'Beeldbank Nationaal Archief'
def get_combined_index_data(self):
combined_index_data = {}
title = self._get_text_or_none('.//item/title')
if title:
title = title.replace('\n', ' ').replace(' ', ' ')
combined_index_data['title'] = title
description = self._get_text_or_none('.//item/description')
if description:
description = description.replace('\n', ' ').replace(' ', ' ')
# Only include the description if it differs from the title
if description != title:
combined_index_data['description'] = description
date = self._get_text_or_none('.//item/dc:date')
if date:
combined_index_data['date'] = datetime.strptime(self._get_text_or_none('.//dc:date'),
'%Y-%m-%dT%H:%M:%SZ')
combined_index_data['date_granularity'] = 14
creators = self.original_item.findall('.//dc:creator',
namespaces=self.original_item.nsmap)
if creators is not None:
authors = []
for author in creators:
# Don't add the author if it's unknown to the source ('[onbekend]')
if author.text == '[onbekend]':
continue
authors.append(unicode(author.text))
combined_index_data['authors'] = authors
picture_versions = self.original_item.findall('.//item/ese:isShownBy',
namespaces=self.original_item.nsmap)
if picture_versions is not None:
combined_index_data['media_urls'] = []
for picture_version in picture_versions:
url = picture_version.text
resolution = self.R_IMG_RES.match(url)
combined_index_data['media_urls'].append({
'original_url': url,
'content_type': 'image/jpeg',
'width': int(resolution.group('width')),
'height': int(resolution.group('height'))
})
return combined_index_data
def get_index_data(self):
return {}
def get_all_text(self):
text_items = []
title = self._get_text_or_none('.//item/title')
if title:
title = title.replace('\n', ' ').replace(' ', ' ')
text_items.append(title)
description = self._get_text_or_none('.//item/description')
if description:
description = description.replace('\n', ' ').replace(' ', ' ')
# Only include the description if it differs from the title
if description != title:
text_items.append(description)
text_items += self._get_all_text('.//item/dc:subject')
text_items += self._get_all_text('.//item/dc:creator')
text_items += self._get_all_text('.//item/dc:coverage')
text_items += self._get_all_text('.//item/dc:type')
text_items += self._get_all_text('.//item/dc:identifier')
text_items += self._get_all_text('.//item/ese:provider')
text_items.append(self._get_text_or_none('.//memorix:MEMORIX/field[@name="Annotatie"]/value'))
return u' '.join([ti for ti in text_items if ti is not None])
| 35.616
| 102
| 0.591195
|
import re
from datetime import datetime
from ocd_backend.items import BaseItem
class NationaalArchiefBeeldbankItem(BaseItem):
R_IMG_RES = re.compile(r'http://.+/thumb/(?P<width>\d+)x(?P<height>\d+)/.+$')
def _get_text_or_none(self, xpath_expression):
node = self.original_item.find(xpath_expression, namespaces=self.original_item.nsmap)
if node is not None and node.text is not None:
return unicode(node.text)
return None
def _get_all_text(self, xpath_expression):
nodes = self.original_item.findall(xpath_expression, namespaces=self.original_item.nsmap)
texts = []
for node in nodes:
if node.text is not None:
texts.append(unicode(node.text))
return texts
def get_original_object_id(self):
return self._get_text_or_none('.//item/guid').split('/')[-1]
def get_original_object_urls(self):
link = self._get_text_or_none('.//item/link')
if link:
return {'html': link}
return {}
def get_rights(self):
return u'Creative Commons Attribution-ShareAlike'
def get_collection(self):
return u'Beeldbank Nationaal Archief'
def get_combined_index_data(self):
combined_index_data = {}
title = self._get_text_or_none('.//item/title')
if title:
title = title.replace('\n', ' ').replace(' ', ' ')
combined_index_data['title'] = title
description = self._get_text_or_none('.//item/description')
if description:
description = description.replace('\n', ' ').replace(' ', ' ')
if description != title:
combined_index_data['description'] = description
date = self._get_text_or_none('.//item/dc:date')
if date:
combined_index_data['date'] = datetime.strptime(self._get_text_or_none('.//dc:date'),
'%Y-%m-%dT%H:%M:%SZ')
combined_index_data['date_granularity'] = 14
creators = self.original_item.findall('.//dc:creator',
namespaces=self.original_item.nsmap)
if creators is not None:
authors = []
for author in creators:
if author.text == '[onbekend]':
continue
authors.append(unicode(author.text))
combined_index_data['authors'] = authors
picture_versions = self.original_item.findall('.//item/ese:isShownBy',
namespaces=self.original_item.nsmap)
if picture_versions is not None:
combined_index_data['media_urls'] = []
for picture_version in picture_versions:
url = picture_version.text
resolution = self.R_IMG_RES.match(url)
combined_index_data['media_urls'].append({
'original_url': url,
'content_type': 'image/jpeg',
'width': int(resolution.group('width')),
'height': int(resolution.group('height'))
})
return combined_index_data
def get_index_data(self):
return {}
def get_all_text(self):
text_items = []
title = self._get_text_or_none('.//item/title')
if title:
title = title.replace('\n', ' ').replace(' ', ' ')
text_items.append(title)
description = self._get_text_or_none('.//item/description')
if description:
description = description.replace('\n', ' ').replace(' ', ' ')
if description != title:
text_items.append(description)
text_items += self._get_all_text('.//item/dc:subject')
text_items += self._get_all_text('.//item/dc:creator')
text_items += self._get_all_text('.//item/dc:coverage')
text_items += self._get_all_text('.//item/dc:type')
text_items += self._get_all_text('.//item/dc:identifier')
text_items += self._get_all_text('.//item/ese:provider')
text_items.append(self._get_text_or_none('.//memorix:MEMORIX/field[@name="Annotatie"]/value'))
return u' '.join([ti for ti in text_items if ti is not None])
| true
| true
|
7908bdf8227c02f4c6de44c83bc393fb992675d5
| 1,507
|
py
|
Python
|
model-optimizer/extensions/middle/UselessMerge.py
|
undeadinu/dldt
|
fbc7a4a710c24def8ab199926a7da90a0394b87d
|
[
"Apache-2.0"
] | 1
|
2019-03-22T06:35:55.000Z
|
2019-03-22T06:35:55.000Z
|
model-optimizer/extensions/middle/UselessMerge.py
|
undeadinu/dldt
|
fbc7a4a710c24def8ab199926a7da90a0394b87d
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/middle/UselessMerge.py
|
undeadinu/dldt
|
fbc7a4a710c24def8ab199926a7da90a0394b87d
|
[
"Apache-2.0"
] | 1
|
2019-06-11T06:20:42.000Z
|
2019-06-11T06:20:42.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import networkx as nx
from extensions.middle.ConstSwitchResolver import ConstSwitchEraser
from mo.graph.graph import erase_node
from mo.middle.replacement import MiddleReplacementPattern
class UselessMergeEraser(MiddleReplacementPattern):
enabled = True
def run_after(self):
return [ConstSwitchEraser]
def pattern(self):
return dict(
nodes=[('merge', dict(kind='op', op='Merge')),
('merge_data', dict(kind='data'))],
edges=[('merge', 'merge_data')]
)
def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
if len(graph.in_edges(match['merge'].id)) <= 1:
erase_node(match['merge'])
erase_node(match['merge_data'])
log.info("Useles Merge op and data nodes was deleted op='{}' data='{}'"
"".format(match['merge'].id, match['merge_data'].id))
| 33.488889
| 83
| 0.68215
|
import logging as log
import networkx as nx
from extensions.middle.ConstSwitchResolver import ConstSwitchEraser
from mo.graph.graph import erase_node
from mo.middle.replacement import MiddleReplacementPattern
class UselessMergeEraser(MiddleReplacementPattern):
enabled = True
def run_after(self):
return [ConstSwitchEraser]
def pattern(self):
return dict(
nodes=[('merge', dict(kind='op', op='Merge')),
('merge_data', dict(kind='data'))],
edges=[('merge', 'merge_data')]
)
def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
if len(graph.in_edges(match['merge'].id)) <= 1:
erase_node(match['merge'])
erase_node(match['merge_data'])
log.info("Useles Merge op and data nodes was deleted op='{}' data='{}'"
"".format(match['merge'].id, match['merge_data'].id))
| true
| true
|
7908be606af3c926cc44177dfe378baed611cd0e
| 968
|
py
|
Python
|
factorial-trailing-digits/factorial_trailing_digits.py
|
fatihcansu/kripton
|
e680d9fd24a632167f5a8ac71924ef636dcd567c
|
[
"Unlicense"
] | 13
|
2021-01-24T20:03:35.000Z
|
2022-03-15T00:49:10.000Z
|
factorial-trailing-digits/factorial_trailing_digits.py
|
fatihcansu/kripton
|
e680d9fd24a632167f5a8ac71924ef636dcd567c
|
[
"Unlicense"
] | null | null | null |
factorial-trailing-digits/factorial_trailing_digits.py
|
fatihcansu/kripton
|
e680d9fd24a632167f5a8ac71924ef636dcd567c
|
[
"Unlicense"
] | 8
|
2021-01-18T21:10:27.000Z
|
2021-03-27T11:31:17.000Z
|
#!/usr/bin/python3
import time
def count_5s(number):
counter = 0
while (number % 5 == 0):
counter += 1
number /= 5
return counter
def last_5_digits(number):
number = number % (10 ** 5)
return number
def factorial(number):
borrowed_2s = 0
product = 1
for i in range(1, number+1):
if i % 2 == 0:
i = int(i/2)
borrowed_2s += 1
num_5s = count_5s(i)
if num_5s:
i = int(i/(5 ** num_5s))
borrowed_2s -= num_5s
product = last_5_digits(product * i)
product *= (2 ** borrowed_2s)
return product
def main(number):
return last_5_digits(
factorial(number)
)
if __name__ == '__main__':
n = 2560000
start_time = time.time()
result = main(n)
print(
"For {n}, took {time:.2f} seconds to find: {result}".format(
**{'n': n, 'time': time.time() - start_time, 'result': result})
)
| 19.36
| 79
| 0.528926
|
import time
def count_5s(number):
counter = 0
while (number % 5 == 0):
counter += 1
number /= 5
return counter
def last_5_digits(number):
number = number % (10 ** 5)
return number
def factorial(number):
borrowed_2s = 0
product = 1
for i in range(1, number+1):
if i % 2 == 0:
i = int(i/2)
borrowed_2s += 1
num_5s = count_5s(i)
if num_5s:
i = int(i/(5 ** num_5s))
borrowed_2s -= num_5s
product = last_5_digits(product * i)
product *= (2 ** borrowed_2s)
return product
def main(number):
return last_5_digits(
factorial(number)
)
if __name__ == '__main__':
n = 2560000
start_time = time.time()
result = main(n)
print(
"For {n}, took {time:.2f} seconds to find: {result}".format(
**{'n': n, 'time': time.time() - start_time, 'result': result})
)
| true
| true
|
7908beb61d6899428df3e5cc3721d73773615b6c
| 3,650
|
py
|
Python
|
homeassistant/components/openweathermap/weather.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 6
|
2017-08-02T19:26:39.000Z
|
2020-03-14T22:47:41.000Z
|
homeassistant/components/openweathermap/weather.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 60
|
2020-08-03T07:32:56.000Z
|
2022-03-31T06:02:07.000Z
|
homeassistant/components/openweathermap/weather.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Support for the OpenWeatherMap (OWM) service."""
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import TEMP_CELSIUS
from .const import (
ATTR_API_CONDITION,
ATTR_API_FORECAST,
ATTR_API_HUMIDITY,
ATTR_API_PRESSURE,
ATTR_API_TEMPERATURE,
ATTR_API_WIND_BEARING,
ATTR_API_WIND_SPEED,
ATTRIBUTION,
DOMAIN,
ENTRY_NAME,
ENTRY_WEATHER_COORDINATOR,
)
from .weather_update_coordinator import WeatherUpdateCoordinator
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up OpenWeatherMap weather entity based on a config entry."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
name = domain_data[ENTRY_NAME]
weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR]
unique_id = f"{config_entry.unique_id}"
owm_weather = OpenWeatherMapWeather(name, unique_id, weather_coordinator)
async_add_entities([owm_weather], False)
class OpenWeatherMapWeather(WeatherEntity):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(
self,
name,
unique_id,
weather_coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
self._name = name
self._unique_id = unique_id
self._weather_coordinator = weather_coordinator
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def condition(self):
"""Return the current condition."""
return self._weather_coordinator.data[ATTR_API_CONDITION]
@property
def temperature(self):
"""Return the temperature."""
return self._weather_coordinator.data[ATTR_API_TEMPERATURE]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self._weather_coordinator.data[ATTR_API_PRESSURE]
@property
def humidity(self):
"""Return the humidity."""
return self._weather_coordinator.data[ATTR_API_HUMIDITY]
@property
def wind_speed(self):
"""Return the wind speed."""
wind_speed = self._weather_coordinator.data[ATTR_API_WIND_SPEED]
if self.hass.config.units.name == "imperial":
return round(wind_speed * 2.24, 2)
return round(wind_speed * 3.6, 2)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._weather_coordinator.data[ATTR_API_WIND_BEARING]
@property
def forecast(self):
"""Return the forecast array."""
return self._weather_coordinator.data[ATTR_API_FORECAST]
@property
def available(self):
"""Return True if entity is available."""
return self._weather_coordinator.last_update_success
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self._weather_coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Get the latest data from OWM and updates the states."""
await self._weather_coordinator.async_request_refresh()
| 29.435484
| 83
| 0.682466
|
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import TEMP_CELSIUS
from .const import (
ATTR_API_CONDITION,
ATTR_API_FORECAST,
ATTR_API_HUMIDITY,
ATTR_API_PRESSURE,
ATTR_API_TEMPERATURE,
ATTR_API_WIND_BEARING,
ATTR_API_WIND_SPEED,
ATTRIBUTION,
DOMAIN,
ENTRY_NAME,
ENTRY_WEATHER_COORDINATOR,
)
from .weather_update_coordinator import WeatherUpdateCoordinator
async def async_setup_entry(hass, config_entry, async_add_entities):
domain_data = hass.data[DOMAIN][config_entry.entry_id]
name = domain_data[ENTRY_NAME]
weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR]
unique_id = f"{config_entry.unique_id}"
owm_weather = OpenWeatherMapWeather(name, unique_id, weather_coordinator)
async_add_entities([owm_weather], False)
class OpenWeatherMapWeather(WeatherEntity):
def __init__(
self,
name,
unique_id,
weather_coordinator: WeatherUpdateCoordinator,
):
self._name = name
self._unique_id = unique_id
self._weather_coordinator = weather_coordinator
@property
def name(self):
return self._name
@property
def unique_id(self):
return self._unique_id
@property
def should_poll(self):
return False
@property
def attribution(self):
return ATTRIBUTION
@property
def condition(self):
return self._weather_coordinator.data[ATTR_API_CONDITION]
@property
def temperature(self):
return self._weather_coordinator.data[ATTR_API_TEMPERATURE]
@property
def temperature_unit(self):
return TEMP_CELSIUS
@property
def pressure(self):
return self._weather_coordinator.data[ATTR_API_PRESSURE]
@property
def humidity(self):
return self._weather_coordinator.data[ATTR_API_HUMIDITY]
@property
def wind_speed(self):
wind_speed = self._weather_coordinator.data[ATTR_API_WIND_SPEED]
if self.hass.config.units.name == "imperial":
return round(wind_speed * 2.24, 2)
return round(wind_speed * 3.6, 2)
@property
def wind_bearing(self):
return self._weather_coordinator.data[ATTR_API_WIND_BEARING]
@property
def forecast(self):
return self._weather_coordinator.data[ATTR_API_FORECAST]
@property
def available(self):
return self._weather_coordinator.last_update_success
async def async_added_to_hass(self):
self.async_on_remove(
self._weather_coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
await self._weather_coordinator.async_request_refresh()
| true
| true
|
7908bf65e9b0e527a64b8fcbb431d3b737eee5bc
| 183
|
py
|
Python
|
pysperf/solver_library.py
|
ZedongPeng/pysperf
|
9d8536c56aee8508ffa142369b1ab7e3d88baaac
|
[
"BSD-2-Clause"
] | null | null | null |
pysperf/solver_library.py
|
ZedongPeng/pysperf
|
9d8536c56aee8508ffa142369b1ab7e3d88baaac
|
[
"BSD-2-Clause"
] | null | null | null |
pysperf/solver_library.py
|
ZedongPeng/pysperf
|
9d8536c56aee8508ffa142369b1ab7e3d88baaac
|
[
"BSD-2-Clause"
] | 2
|
2020-05-21T22:15:51.000Z
|
2020-06-02T23:02:08.000Z
|
"""
This file imports `__all__` from the solvers directory, thus populating the solver registry.
"""
from pysperf.solvers import *
from .config import solvers
__all__ = ['solvers']
| 20.333333
| 92
| 0.748634
|
from pysperf.solvers import *
from .config import solvers
__all__ = ['solvers']
| true
| true
|
7908c07a4c1962513a9720bd6148b210668899a2
| 15,601
|
py
|
Python
|
mechlib/amech_io/parser/run.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechlib/amech_io/parser/run.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechlib/amech_io/parser/run.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 8
|
2019-12-18T20:09:46.000Z
|
2020-11-14T16:37:28.000Z
|
""" Parses the `run.dat` input file for MechDriver that specifices all
of calculations to run for a given session of the code.
Specifcally, looks for and parses several subsections:
(1) `input` block: various input
(2) `pes' block: idxs denoting what PESs in mech file to run
(3) `spc` block: idxs denoting what species in .csv file to run
(4) `els tasks` block: set of tasks for ESDriver
(5) `therm tasks` block: set of tasks for ThermDriver
(6) `ktp tasks` block: set of tasks for kTPDriver
(7) `trans tasks` block: set of tasks for TransDriver
(8) `proc tasks` block: set of tasks for ProcDriver
Function parses the strings and converts them into formatted dictionaries
that are passed to the sub-drivers of the code:
ESDriver, ThermoDriver, kTPDriver, TransDriver, ProcDriver
These dictionaries are built in three stages:
(1) filled with user-specified options
(2) default values not defined by the user are added, and
(3) assessed that all keywordws and values are supported by the code.
"""
import sys
import automol
import ioformat
from mechlib.amech_io.printer import error_message
from mechlib.amech_io.parser._keywrd import defaults_from_val_dct
from mechlib.amech_io.parser._keywrd import defaults_from_key_val_dcts
from mechlib.amech_io.parser._keywrd import check_dct1
from mechlib.amech_io.parser._keywrd import check_thy_lvls
# DICTIONARIES OF DEFAULTS #
# Run Keywords
RUN_INP_REQ = [
'inp_mech', 'out_mech', 'inp_spc', 'out_spc', 'run_prefix', 'save_prefix']
RUN_INP_VAL_DCT = {
'inp_mech': ((str,), ('chemkin'), 'chemkin'),
'inp_spc': ((str,), ('csv',), 'csv'),
'out_mech': ((str,), ('chemkin'), 'chemkin'),
'out_spc': ((str,), ('csv',), 'csv'),
'print_mech': ((bool,), (True, False), False),
'print_debug': ((bool,), (True, False), False),
'run_prefix': ((str,), (), None),
'save_prefix': ((str,), (), None)
}
# HANDLE TASK KEYS
# Commonly useful task keyword lists
BASE = ('runlvl', 'inplvl', 'retryfail', 'overwrite')
MREF = ('var_splvl1', 'var_splvl2', 'var_scnlvl')
TRANS = ('bath', 'njobs', 'nsamp', 'conf')
PRNT = ('geolvl', 'proplvl', 'cnf_range', 'sort')
# Supported object types for task (useful if task requestes 'all')
SUPP_OBJS = ('spc', 'ts')
# Determines what objects and keywords are allowed for tasks for ES,Trans,Print
# Need way to set required tsks
# Tasks: (allowed obj, allowed_keywords)
TSK_KEY_DCT = {
# Electronic Structure Driver Tasks
'init_geom': (('spc',), BASE),
'find_ts': (('spc', 'ts'), BASE + MREF + ('nobarrier',)), # 're_id')),
'conf_pucker': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_samp': (('spc', 'ts'), BASE + ('cnf_range', 'sort', 'resave',)),
'conf_energy': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_grad': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_hess': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_vpt2': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_prop': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_opt': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'hr_scan': (('spc', 'ts'), BASE + ('tors_model', 'resamp_min',
'cnf_range', 'sort',)),
'hr_grad': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_hess': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_energy': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_vpt2': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_reopt': (('spc', 'ts'), BASE + ('tors_model', 'hrthresh',
'cnf_range', 'sort',)),
'tau_samp': (('spc', 'ts'), BASE + ('resave',)),
'tau_energy': (('spc', 'ts'), BASE),
'tau_grad': (('spc', 'ts'), BASE),
'tau_hess': (('spc', 'ts'), BASE + ('hessmax',)),
'rpath_scan': (('ts',), BASE + ('rxncoord',)),
'rpath_energy': (('ts',), BASE + ('rxncoord',)),
'rpath_grad': (('ts',), BASE + ('rxncoord',)),
'rpath_hess': (('ts',), BASE + ('rxncoord',)),
# Transport Driver Tasks
'onedmin': (('spc',), (BASE + TRANS)),
'write_transport': (('spc',), (BASE + TRANS)),
# Process Driver Tasks
'freqs': (('spc', 'ts', 'vdw'), PRNT + ('scale',)),
'energy': (('spc', 'ts'), PRNT),
'geo': (('spc', 'ts'), PRNT),
'molden': (('spc', 'ts'), PRNT),
'zmatrix': (('spc', 'ts'), PRNT),
'torsions': (('spc', 'ts'), PRNT),
'enthalpy': (('spc', 'ts'), PRNT),
'pf': (('spc', 'ts'), PRNT),
'messpf_inp': (('spc', 'ts'), PRNT),
'coeffs': (('spc', 'ts'), ()),
# KTP/Therm
'write_mess': ((), ('kin_model', 'spc_model', 'overwrite',
'use_well_extension', 'float_precision',
'cnf_range', 'sort')),
'run_mess': ((), ('kin_model', 'spc_model', 'nprocs',
'cnf_range', 'sort')),
'run_fits': ((), ('kin_model', 'cnf_range', 'sort')),
}
# tsk: (object types, (allowed values), default) # use functions for weird
# maybe the required checks use if None given?
TSK_VAL_DCT = {
# Common
'runlvl': ((str,), (), None),
'inplvl': ((str,), (), None),
'var_splvl1': ((str,), (), None),
'var_splvl2': ((str,), (), None),
'var_scnlvl': ((str,), (), None),
'resave': ((bool,), (True, False), False),
'retryfail': ((bool,), (True, False), True),
'overwrite': ((bool,), (True, False), False),
# ES
'cnf_range': ((str,), (), 'min'), # change to econfs, nconfs
'sort': ((str,), (), None),
'hessmax': ((int,), (), 1000),
'tors_model': ((str,),
('1dhr', '1dhrf', '1dhrfa', 'mdhr', 'mdhrv'), '1dhr'),
'resamp_min': ((bool,), (True, False), False),
'hrthresh': ((float,), (), -0.2),
'potthresh': ((float,), (), 0.3),
'rxncoord': ((str,), ('irc', 'auto'), 'auto'),
'nobarrier': ((str,), ('pst', 'rpvtst', 'vrctst'), None),
're_id': ((bool,), (True, False), False),
# Trans
'njobs': ((int,), (), 1),
'nsamp': ((int,), (), 1),
'conf': ((str,), ('sphere', 'min'), 'sphere'),
# Proc
'geolvl': ((str,), (), None),
'proplvl': ((str,), (), None),
'nconfs': ((str,), (), 'min'),
'econfs': ((str,), (), 'min'),
'scale': ((str,), (), None),
# KTP/Therm
'kin_model': ((str,), (), None),
'spc_model': ((str,), (), None),
'nprocs': ((int,), (), 10),
'use_well_extension': ((bool,), (), False),
'linked_pes': ((tuple,), (), None),
'float_precision': ((str,), ('double', 'quadruple'), 'double'),
}
# Have nconfs and econfs keywords and combine them to figure out which to use?
# INPUT PARSERS #
# Input Section
def input_dictionary(run_str):
""" Parses the `input` block and builds a
dictionary of keywords and their corresponding values.
:param run_str: input string of the run.dat block
:type run_str: str
:rtype: dict[str: obj]
"""
# Read the input block
inp_block = ioformat.ptt.end_block(run_str, 'input', footer='input')
inp_dct = ioformat.ptt.keyword_dct_from_block(inp_block)
# Add defaults to the dictionary
inp_dct = automol.util.dict_.right_update(
defaults_from_val_dct(RUN_INP_VAL_DCT), inp_dct)
# Check the dictionary
check_dct1(inp_dct, RUN_INP_VAL_DCT, RUN_INP_REQ, 'Run-Input')
return inp_dct
# Chemistry objects
def chem_idxs(run_str):
""" Parses the `pes` block of the run.dat file and
builds a dictionary of the PESs and corresponding channels the
user wishes to run.
Parses the `spc` block of the run.dat file and
builds a dictionary of the species the
user wishes to run.
May break if idx is given on two lines of string.
:param run_str: string of the run.dat input file
:type run_str: str
:returns: ({pes_idx: list of channel_idxs}, {1: list of species idxs})
:rtype: dict[str: tuple]
"""
# PES idxs to run
pes_block = ioformat.ptt.end_block(run_str, 'pes', footer='pes')
if pes_block is not None:
_pes_idxs = {}
for line in pes_block.strip().splitlines():
[pes_nums, chn_nums] = line.split(':')
_pes_nums = ioformat.ptt.idx_lst_from_line(pes_nums)
_chn_nums = ioformat.ptt.idx_lst_from_line(chn_nums)
for idx in _pes_nums:
_pes_idxs.update({idx-1: tuple(val-1 for val in _chn_nums)})
else:
_pes_idxs = None
# SPC idxs to run
spc_block = ioformat.ptt.end_block(run_str, 'spc', footer='spc')
if spc_block is not None:
_idxs = ()
for line in spc_block.splitlines():
_idxs += ioformat.ptt.idx_lst_from_line(line)
_spc_idxs = {1: tuple(val-1 for val in _idxs)}
else:
_spc_idxs = None
# Kill code if no idxs given
if _pes_idxs is None and _spc_idxs is None:
error_message('No pes or spc section given in run.dat file. Quitting')
sys.exit()
return _pes_idxs, _spc_idxs
# Driver Task Lists
def extract_task(tsk, tsk_lst):
""" Searches for a task in the task lst and if found:
the corresponding keywords and values will be returned
Function only works if task is present in the list one time.
:param tsk: task to extract information for
:type tsk: str
:param tsk_lst: list of tasks to run for some driver
:type tsk_lst: tuple(tuple(str/dict))
:rtype: tuple(str/dict)
"""
tsk_inf = None
for _tsk_inf in tsk_lst:
if any(x == tsk for x in _tsk_inf): # just looks in all pars
tsk_inf = _tsk_inf
break
return tsk_inf
def tasks(run_str, thy_dct):
""" runstr
"""
# Read blocks and build user determined task lists`
es_block = ioformat.ptt.end_block(run_str, 'els', footer='els')
trans_block = ioformat.ptt.end_block(run_str, 'trans', footer='trans')
therm_block = ioformat.ptt.end_block(run_str, 'thermo', footer='thermo')
ktp_block = ioformat.ptt.end_block(run_str, 'ktp', footer='ktp')
proc_block = ioformat.ptt.end_block(run_str, 'proc', footer='proc')
# print('els\n', es_block)
# print('therm\n', therm_block)
# print('trans\n', trans_block)
# print('proc\n', proc_block)
es_tsks = _tsk_lst(es_block, 3)
therm_tsks = _tsk_lst(therm_block, 2)
ktp_tsks = _tsk_lst(ktp_block, 2)
trans_tsks = _tsk_lst(trans_block, 3)
proc_tsks = _tsk_lst(proc_block, 3)
# Add defaults to each task as needed
es_tsks = _tsk_defaults(es_tsks)
therm_tsks = _tsk_defaults(therm_tsks)
ktp_tsks = _tsk_defaults(ktp_tsks)
trans_tsks = _tsk_defaults(trans_tsks)
proc_tsks = _tsk_defaults(proc_tsks)
# Assess each dictionary for correctness
_check_tsks(es_tsks, thy_dct)
_check_tsks(therm_tsks, thy_dct)
_check_tsks(ktp_tsks, thy_dct)
_check_tsks(trans_tsks, thy_dct)
_check_tsks(proc_tsks, thy_dct)
tsk_dct = {
'es': es_tsks,
'thermo': therm_tsks,
'ktp': ktp_tsks,
'trans': trans_tsks,
'proc': proc_tsks
}
return tsk_dct
def _tsk_lst(tsk_str, num):
""" Set the sequence of electronic structure tasks for a given
species or PESs
"""
# Build the task lists from the string
if tsk_str is not None:
tsks = []
tsk_str = ioformat.remove_whitespace_from_string(tsk_str)
for line in tsk_str.splitlines():
_tsk = _split_line(line, num)
tsks.append(_tsk)
mod_tsks = tsks
# mod_tsks = _expand_tsks(tsks) if num == 3 else tsks
else:
mod_tsks = None
return mod_tsks
def _expand_tsks(tsks_lst):
""" Loops over the driver task list and checks if each task is a
macro-task that should be expanded into sub-tasks.
Right now, it splits all obj tasks into spc and ts
:param tsk_lst: list of tasks to run for some driver
:type tsk_lst: tuple(tuple(str/dict))
:rtype: tuple(str/dict)
"""
mod_tsks_lst = []
for tsk_lst in tsks_lst:
[obj, tsk, dct] = tsk_lst
objs = ['spc', 'ts'] if obj == 'all' else [obj]
for obj in objs:
mod_tsks_lst.append([obj, tsk, dct])
return mod_tsks_lst
def _tsk_defaults(tsk_lst):
""" Fill out the keyword dictionaries for various task lists with
default values
"""
if tsk_lst is not None:
mod_tsk_lst = []
for _tsk_lst in tsk_lst:
keyword_dct = _tsk_lst[-1]
tsk = _tsk_lst[:-1][-1]
default_dct = defaults_from_key_val_dcts(
tsk, TSK_KEY_DCT, TSK_VAL_DCT)
new_key_dct = automol.util.dict_.right_update(
default_dct, keyword_dct)
mod_lst = _tsk_lst[:-1] + [new_key_dct]
mod_tsk_lst.append(mod_lst)
else:
mod_tsk_lst = None
return mod_tsk_lst
def _check_tsks(tsk_lsts, thy_dct):
""" Loop over all of the tasks, add default keywords and parameters
and assesses if all the input is valid
"""
if tsk_lsts is not None:
for tsk_lst in tsk_lsts:
# Unpack the task
_tsk = tsk_lst[:-1]
if len(_tsk) == 2:
# Case(1): spc task keywords (ESDriver)
obj, tsk = _tsk[0], _tsk[1]
else:
# Case(2): task keywords (ThermoDriver, kTPDriver)
obj, tsk = None, _tsk[0]
key_dct = tsk_lst[-1]
# Check if the obj is allowed
if obj is not None:
# Have to make lst to handle case where obj == 'all'
obj_lst = SUPP_OBJS if obj == 'all' else (obj,)
for _obj in obj_lst:
if _obj not in TSK_KEY_DCT[tsk][0]:
error_message(f'obj {obj}, not allowed for {tsk}')
sys.exit()
# Check if keyword values are allowed
check_dct1(key_dct, TSK_VAL_DCT, (), 'Task')
# Check keywords with thylvls as values use lvls defined in thy dct
check_thy_lvls(key_dct, thy_dct)
def _split_line(line, num):
""" Split a line
"""
line = line.split()
if num == 3:
tsk, key_lst = line[:2], line[2:]
elif num == 2:
tsk, key_lst = line[:1], line[1:]
key_dct = ioformat.ptt.keyword_dct_from_block('\n'.join(key_lst))
return tsk + [key_dct] # could convert to empty dct instead of None
# Check a bunch of stuff
def check_inputs(tsk_dct, pes_dct, pes_mod_dct, spc_mod_dct):
""" Check if inputs placed that is required
"""
# Check if a mechanism has been provided where required
if tsk_dct['ktp'] or tsk_dct['thermo']:
if pes_mod_dct is None:
error_message(
'kTPDriver or Thermo Requested. \n'
' However no kin model provided in models.dat\n'
' Exiting MechDriver...')
sys.exit()
if spc_mod_dct is None:
error_message(
'kTPDriver or Thermo Requested. \n'
' However no spc model provided in models.dat\n'
' Exiting MechDriver...')
sys.exit()
if tsk_dct['ktp']:
if pes_dct is None:
error_message(
'kTPDriver Requested. \n'
' However no reaction channels provided in mechanism.dat\n'
' Exiting MechDriver...')
sys.exit()
| 35.137387
| 79
| 0.581629
|
import sys
import automol
import ioformat
from mechlib.amech_io.printer import error_message
from mechlib.amech_io.parser._keywrd import defaults_from_val_dct
from mechlib.amech_io.parser._keywrd import defaults_from_key_val_dcts
from mechlib.amech_io.parser._keywrd import check_dct1
from mechlib.amech_io.parser._keywrd import check_thy_lvls
RUN_INP_REQ = [
'inp_mech', 'out_mech', 'inp_spc', 'out_spc', 'run_prefix', 'save_prefix']
RUN_INP_VAL_DCT = {
'inp_mech': ((str,), ('chemkin'), 'chemkin'),
'inp_spc': ((str,), ('csv',), 'csv'),
'out_mech': ((str,), ('chemkin'), 'chemkin'),
'out_spc': ((str,), ('csv',), 'csv'),
'print_mech': ((bool,), (True, False), False),
'print_debug': ((bool,), (True, False), False),
'run_prefix': ((str,), (), None),
'save_prefix': ((str,), (), None)
}
BASE = ('runlvl', 'inplvl', 'retryfail', 'overwrite')
MREF = ('var_splvl1', 'var_splvl2', 'var_scnlvl')
TRANS = ('bath', 'njobs', 'nsamp', 'conf')
PRNT = ('geolvl', 'proplvl', 'cnf_range', 'sort')
SUPP_OBJS = ('spc', 'ts')
TSK_KEY_DCT = {
'init_geom': (('spc',), BASE),
'find_ts': (('spc', 'ts'), BASE + MREF + ('nobarrier',)),
'conf_pucker': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_samp': (('spc', 'ts'), BASE + ('cnf_range', 'sort', 'resave',)),
'conf_energy': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_grad': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_hess': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_vpt2': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_prop': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_opt': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'hr_scan': (('spc', 'ts'), BASE + ('tors_model', 'resamp_min',
'cnf_range', 'sort',)),
'hr_grad': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_hess': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_energy': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_vpt2': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_reopt': (('spc', 'ts'), BASE + ('tors_model', 'hrthresh',
'cnf_range', 'sort',)),
'tau_samp': (('spc', 'ts'), BASE + ('resave',)),
'tau_energy': (('spc', 'ts'), BASE),
'tau_grad': (('spc', 'ts'), BASE),
'tau_hess': (('spc', 'ts'), BASE + ('hessmax',)),
'rpath_scan': (('ts',), BASE + ('rxncoord',)),
'rpath_energy': (('ts',), BASE + ('rxncoord',)),
'rpath_grad': (('ts',), BASE + ('rxncoord',)),
'rpath_hess': (('ts',), BASE + ('rxncoord',)),
'onedmin': (('spc',), (BASE + TRANS)),
'write_transport': (('spc',), (BASE + TRANS)),
'freqs': (('spc', 'ts', 'vdw'), PRNT + ('scale',)),
'energy': (('spc', 'ts'), PRNT),
'geo': (('spc', 'ts'), PRNT),
'molden': (('spc', 'ts'), PRNT),
'zmatrix': (('spc', 'ts'), PRNT),
'torsions': (('spc', 'ts'), PRNT),
'enthalpy': (('spc', 'ts'), PRNT),
'pf': (('spc', 'ts'), PRNT),
'messpf_inp': (('spc', 'ts'), PRNT),
'coeffs': (('spc', 'ts'), ()),
'write_mess': ((), ('kin_model', 'spc_model', 'overwrite',
'use_well_extension', 'float_precision',
'cnf_range', 'sort')),
'run_mess': ((), ('kin_model', 'spc_model', 'nprocs',
'cnf_range', 'sort')),
'run_fits': ((), ('kin_model', 'cnf_range', 'sort')),
}
'runlvl': ((str,), (), None),
'inplvl': ((str,), (), None),
'var_splvl1': ((str,), (), None),
'var_splvl2': ((str,), (), None),
'var_scnlvl': ((str,), (), None),
'resave': ((bool,), (True, False), False),
'retryfail': ((bool,), (True, False), True),
'overwrite': ((bool,), (True, False), False),
'cnf_range': ((str,), (), 'min'),
'sort': ((str,), (), None),
'hessmax': ((int,), (), 1000),
'tors_model': ((str,),
('1dhr', '1dhrf', '1dhrfa', 'mdhr', 'mdhrv'), '1dhr'),
'resamp_min': ((bool,), (True, False), False),
'hrthresh': ((float,), (), -0.2),
'potthresh': ((float,), (), 0.3),
'rxncoord': ((str,), ('irc', 'auto'), 'auto'),
'nobarrier': ((str,), ('pst', 'rpvtst', 'vrctst'), None),
're_id': ((bool,), (True, False), False),
'njobs': ((int,), (), 1),
'nsamp': ((int,), (), 1),
'conf': ((str,), ('sphere', 'min'), 'sphere'),
'geolvl': ((str,), (), None),
'proplvl': ((str,), (), None),
'nconfs': ((str,), (), 'min'),
'econfs': ((str,), (), 'min'),
'scale': ((str,), (), None),
'kin_model': ((str,), (), None),
'spc_model': ((str,), (), None),
'nprocs': ((int,), (), 10),
'use_well_extension': ((bool,), (), False),
'linked_pes': ((tuple,), (), None),
'float_precision': ((str,), ('double', 'quadruple'), 'double'),
}
def input_dictionary(run_str):
inp_block = ioformat.ptt.end_block(run_str, 'input', footer='input')
inp_dct = ioformat.ptt.keyword_dct_from_block(inp_block)
inp_dct = automol.util.dict_.right_update(
defaults_from_val_dct(RUN_INP_VAL_DCT), inp_dct)
check_dct1(inp_dct, RUN_INP_VAL_DCT, RUN_INP_REQ, 'Run-Input')
return inp_dct
def chem_idxs(run_str):
pes_block = ioformat.ptt.end_block(run_str, 'pes', footer='pes')
if pes_block is not None:
_pes_idxs = {}
for line in pes_block.strip().splitlines():
[pes_nums, chn_nums] = line.split(':')
_pes_nums = ioformat.ptt.idx_lst_from_line(pes_nums)
_chn_nums = ioformat.ptt.idx_lst_from_line(chn_nums)
for idx in _pes_nums:
_pes_idxs.update({idx-1: tuple(val-1 for val in _chn_nums)})
else:
_pes_idxs = None
spc_block = ioformat.ptt.end_block(run_str, 'spc', footer='spc')
if spc_block is not None:
_idxs = ()
for line in spc_block.splitlines():
_idxs += ioformat.ptt.idx_lst_from_line(line)
_spc_idxs = {1: tuple(val-1 for val in _idxs)}
else:
_spc_idxs = None
if _pes_idxs is None and _spc_idxs is None:
error_message('No pes or spc section given in run.dat file. Quitting')
sys.exit()
return _pes_idxs, _spc_idxs
def extract_task(tsk, tsk_lst):
tsk_inf = None
for _tsk_inf in tsk_lst:
if any(x == tsk for x in _tsk_inf):
tsk_inf = _tsk_inf
break
return tsk_inf
def tasks(run_str, thy_dct):
es_block = ioformat.ptt.end_block(run_str, 'els', footer='els')
trans_block = ioformat.ptt.end_block(run_str, 'trans', footer='trans')
therm_block = ioformat.ptt.end_block(run_str, 'thermo', footer='thermo')
ktp_block = ioformat.ptt.end_block(run_str, 'ktp', footer='ktp')
proc_block = ioformat.ptt.end_block(run_str, 'proc', footer='proc')
es_tsks = _tsk_lst(es_block, 3)
therm_tsks = _tsk_lst(therm_block, 2)
ktp_tsks = _tsk_lst(ktp_block, 2)
trans_tsks = _tsk_lst(trans_block, 3)
proc_tsks = _tsk_lst(proc_block, 3)
es_tsks = _tsk_defaults(es_tsks)
therm_tsks = _tsk_defaults(therm_tsks)
ktp_tsks = _tsk_defaults(ktp_tsks)
trans_tsks = _tsk_defaults(trans_tsks)
proc_tsks = _tsk_defaults(proc_tsks)
_check_tsks(es_tsks, thy_dct)
_check_tsks(therm_tsks, thy_dct)
_check_tsks(ktp_tsks, thy_dct)
_check_tsks(trans_tsks, thy_dct)
_check_tsks(proc_tsks, thy_dct)
tsk_dct = {
'es': es_tsks,
'thermo': therm_tsks,
'ktp': ktp_tsks,
'trans': trans_tsks,
'proc': proc_tsks
}
return tsk_dct
def _tsk_lst(tsk_str, num):
if tsk_str is not None:
tsks = []
tsk_str = ioformat.remove_whitespace_from_string(tsk_str)
for line in tsk_str.splitlines():
_tsk = _split_line(line, num)
tsks.append(_tsk)
mod_tsks = tsks
else:
mod_tsks = None
return mod_tsks
def _expand_tsks(tsks_lst):
mod_tsks_lst = []
for tsk_lst in tsks_lst:
[obj, tsk, dct] = tsk_lst
objs = ['spc', 'ts'] if obj == 'all' else [obj]
for obj in objs:
mod_tsks_lst.append([obj, tsk, dct])
return mod_tsks_lst
def _tsk_defaults(tsk_lst):
if tsk_lst is not None:
mod_tsk_lst = []
for _tsk_lst in tsk_lst:
keyword_dct = _tsk_lst[-1]
tsk = _tsk_lst[:-1][-1]
default_dct = defaults_from_key_val_dcts(
tsk, TSK_KEY_DCT, TSK_VAL_DCT)
new_key_dct = automol.util.dict_.right_update(
default_dct, keyword_dct)
mod_lst = _tsk_lst[:-1] + [new_key_dct]
mod_tsk_lst.append(mod_lst)
else:
mod_tsk_lst = None
return mod_tsk_lst
def _check_tsks(tsk_lsts, thy_dct):
if tsk_lsts is not None:
for tsk_lst in tsk_lsts:
_tsk = tsk_lst[:-1]
if len(_tsk) == 2:
obj, tsk = _tsk[0], _tsk[1]
else:
obj, tsk = None, _tsk[0]
key_dct = tsk_lst[-1]
if obj is not None:
obj_lst = SUPP_OBJS if obj == 'all' else (obj,)
for _obj in obj_lst:
if _obj not in TSK_KEY_DCT[tsk][0]:
error_message(f'obj {obj}, not allowed for {tsk}')
sys.exit()
check_dct1(key_dct, TSK_VAL_DCT, (), 'Task')
check_thy_lvls(key_dct, thy_dct)
def _split_line(line, num):
line = line.split()
if num == 3:
tsk, key_lst = line[:2], line[2:]
elif num == 2:
tsk, key_lst = line[:1], line[1:]
key_dct = ioformat.ptt.keyword_dct_from_block('\n'.join(key_lst))
return tsk + [key_dct]
def check_inputs(tsk_dct, pes_dct, pes_mod_dct, spc_mod_dct):
if tsk_dct['ktp'] or tsk_dct['thermo']:
if pes_mod_dct is None:
error_message(
'kTPDriver or Thermo Requested. \n'
' However no kin model provided in models.dat\n'
' Exiting MechDriver...')
sys.exit()
if spc_mod_dct is None:
error_message(
'kTPDriver or Thermo Requested. \n'
' However no spc model provided in models.dat\n'
' Exiting MechDriver...')
sys.exit()
if tsk_dct['ktp']:
if pes_dct is None:
error_message(
'kTPDriver Requested. \n'
' However no reaction channels provided in mechanism.dat\n'
' Exiting MechDriver...')
sys.exit()
| true
| true
|
7908c33ec88b3c62a7ab532f0fbc23129d85ce34
| 8,070
|
py
|
Python
|
Homework_1/Python/homework_1_by_kirbs.py
|
freeernest/edX-Learning-From-Data-Solutions
|
5cbcf0885b5fdb00c3658d230fc7bb7e20b5cf44
|
[
"Apache-2.0"
] | 79
|
2015-01-27T11:09:24.000Z
|
2022-02-05T12:01:35.000Z
|
Homework_1/Python/homework_1_by_kirbs.py
|
freeernest/edX-Learning-From-Data-Solutions
|
5cbcf0885b5fdb00c3658d230fc7bb7e20b5cf44
|
[
"Apache-2.0"
] | 1
|
2018-08-25T05:45:11.000Z
|
2018-12-04T14:44:32.000Z
|
Homework_1/Python/homework_1_by_kirbs.py
|
freeernest/edX-Learning-From-Data-Solutions
|
5cbcf0885b5fdb00c3658d230fc7bb7e20b5cf44
|
[
"Apache-2.0"
] | 40
|
2015-04-06T18:43:34.000Z
|
2021-03-28T18:08:40.000Z
|
#! /usr/bin/python
#
# This is the answer code for the course "Learning from Data" on edX.org
# https://www.edx.org/course/caltechx/cs1156x/learning-data/1120
#
# The software is intended for course usage, no guarantee whatsoever.
# Date: 10/4/2013
# Created by: kirbs
# See notes at bottom for further details.
import sys
import os
import random
import pylab
import scipy
import numpy as np
#############################################################################
#############################################################################
# Returns a list of points with y (indicating 1/-1) as the last element
# and the x,y coordinates for the two points separating line.
# Returns a list of points; each point is a list in the following format.
# [x0, x1, x2, y] i.e. [dummy 1 to represent threshold, x1 value, x2 value, sample points correct sign (+1/-1)]
def generatePoints(numberOfPoints):
## random.seed(1) # used for testing
x1 = random.uniform(-1, 1)
y1 = random.uniform(-1, 1)
x2 = random.uniform(-1, 1)
y2 = random.uniform(-1, 1)
points = []
for i in range (0,numberOfPoints - 1):
## random.seed(1) # used for testing
x = random.uniform (-1, 1)
y = random.uniform (-1, 1)
points.append([1, x, y, targetFunction(x1, y1, x2, y2, x, y)]) # add 1/-1 indicator to the end of each point list
return x1, y1, x2, y2, points
# This function determines the cross product between a line and a given point.
# Returns 1 if above the line and -1 if below the line.
def targetFunction(x1,y1,x2,y2,x3,y3):
u = (x2-x1)*(y3-y1) - (y2-y1)*(x3-x1)
if u >= 0:
return 1
elif u < 0:
return -1
# Simple sign function
def sign(y):
if y >= 0:
return 1
elif y < 0:
return -1
# a.k.a dot product
def perceptronCalc(x, w):
return x[0]*w[0] + x[1]*w[1] + x[2]*w[2]
def train(training_points, iterationLimit):
w = [0.0,0.0,0.0] # initialize weights for w[0], w[1], w[2]
learned = False
iterations = 0 # keep track of the iteration count
# This method is the primary PLA implentation.
# It returns True when all sample points are corectly classfied by the hypothesis.
# Returns False if there was a misclassified point and the weight vector changed.
def updateWeights():
random.shuffle(training_points) # randomize training points
for point in training_points:
result = sign(perceptronCalc(point,w)) # caclulate point and determine its sign.
if point[3] != result: # does sample point's result match our calculated result?
# Use line below to watch the perceptron's weights change
# print str(iterations) + " " + str(w) + " " + str(result) + " " + str(point) + " " + str(perceptronCalc(point))
# if not update weights by sample point's result
w[0] += point[0]*point[3]
w[1] += point[1]*point[3]
w[2] += point[2]*point[3]
return False # break out of loop and return
return True # if the loop reaches this point all calculated points in the training points match their expected y's
while not learned:
iterations += 1
noErrors = updateWeights()
if iterations == iterationLimit or noErrors:
learned = True
break
return iterations, w
# Calculates approximate probability of hypothesis function returns a result
# that is different from the target function.
def findErrorProbability(x1,y1,x2,y2, weights, numberOfPointsToTest):
numberOfErrors = 0
for i in range(0, numberOfPointsToTest-1):
#generate random test points
x = random.uniform(-1,1)
y = random.uniform(-1,1)
#compare results from target function and hypothesis function
if targetFunction(x1,y1,x2,y2,x,y) != sign(perceptronCalc([1,x,y], weights)):
numberOfErrors += 1 # keep track of errors
return numberOfErrors/float(numberOfPointsToTest)
# Runs runTrial specified number of times.
# Returns average iterations, average error probability, and a histogram of trial iteration count.
def runSimulation(numberOfTrials, numberOfTestPoints, iterationLimit):
interations = []
probability = []
for t in range(1,numberOfTrials+1):
iteration_count, w, error_probability = runTrial(numberOfTestPoints, iterationLimit)
interations.append(iteration_count)
probability.append(error_probability)
print "Avg. iterations: " + str(np.mean(interations)) + " : Avg. error probability: " + str(np.mean(probability))
pylab.hist(interations)
pylab.show()
# Runs one trial based on the number of test points desired and an iteration limit to cap run time.
# If showChart is set to True, this function with also return a chart of the points, target function and hypothesis.
# Returns the number of iterations perceptron took to converge, final weights, and the error probability.
def runTrial(numberOfTestPoints, iterationLimit, showChart = False):
x1, y1, x2, y2, points = generatePoints(numberOfTestPoints)
iterations, w = train(points, iterationLimit)
errorProb = findErrorProbability(x1,y1,x2,y2,w, 10000)
if showChart:
if iterations == iterationLimit:
print "No solution found in " + str(iterations) + " iterations!"
print "Iterations: " + str(iterations) + ' | Weights: ' + str(w)
# plot points above(green) and below(blue) the target function.
green_x = []
green_y = []
blue_x = []
blue_y = []
for x in points:
if x[3] == 1:
green_x.append(x[1])
green_y.append(x[2])
else:
blue_x.append(x[1])
blue_y.append(x[2])
pylab.plot(green_x, green_y, 'go')
pylab.plot(blue_x, blue_y, 'bo')
# plot target function(black) and hypothesis function(red) lines
x = np.array( [-1,1] )
slope = (y2-y1)/(x2-x1)
intercept = y2 - slope * x2
pylab.plot(x, slope*x + intercept, 'k--')
pylab.plot( x, -w[1]/w[2] * x - w[0] / w[2] , 'r' ) # this will throw an error if w[2] == 0
pylab.ylim([-1,1])
pylab.xlim([-1,1])
pylab.show()
return iterations, w, errorProb
########################################################################
############################----NOTES----###############################
########################################################################
# Uncomment one line below and reload the script in your favorite Python
# environment. Or load the script and type the method with requireed
# paramaters you want to execute.
########################################################################
########################################################################
# runSimulation takes 3 arguments, number of trials to run, number of test points, and interation limit.
# The higher you set each parameter, the longer this method takes to run.
# This will return the average number of iterations the perceptron took to converge
# and the average error probability.
# Question 7/8
# runSimulation(1000, 10, 100)
# Question 9/10
# runSimulation(1000, 100, 1000)
#########################################################################
#########################################################################
# runTrial takes 3 arguments, number of points, iteration limit, and boolean if a chart should be shown.
# This method returns the number of iteration perceptron took to converge, the final
# weights vector, and the error probability.
# runTrial(10, 100, True) # Show graph of one trial with points, hypothesis (red line), and target funtion (black line).
# runTrial(10, 100) # No chart
# runTrial(10, 100, False) # No chart
| 41.173469
| 129
| 0.584634
|
import sys
import os
import random
import pylab
import scipy
import numpy as np
| false
| true
|
7908c7b459778572fc6997cfc908d7851bb9cdc7
| 2,354
|
py
|
Python
|
spidermon/templates.py
|
zanachka/spidermon
|
d2840b6bbb6ba6d8a0ef633deac66588d243e615
|
[
"BSD-3-Clause"
] | 405
|
2019-01-10T13:06:09.000Z
|
2022-03-30T20:14:58.000Z
|
spidermon/templates.py
|
zanachka/spidermon
|
d2840b6bbb6ba6d8a0ef633deac66588d243e615
|
[
"BSD-3-Clause"
] | 226
|
2019-01-04T13:31:17.000Z
|
2022-03-28T21:06:10.000Z
|
spidermon/templates.py
|
zanachka/spidermon
|
d2840b6bbb6ba6d8a0ef633deac66588d243e615
|
[
"BSD-3-Clause"
] | 87
|
2019-01-07T10:23:26.000Z
|
2022-02-22T04:38:04.000Z
|
import datetime
import inspect
import os
import pprint as pretty_print
import jinja2
from jinja2 import Environment, FileSystemLoader
DEFAULT_TEMPLATE_FOLDERS = ["templates"]
def get_log_errors(logs):
return [e for e in logs.list() if e["level"] >= 40]
def make_list(obj):
return list(obj)
def pprint(obj):
return pretty_print.pformat(obj)
def format_time(time):
if not isinstance(time, datetime.timedelta):
time = datetime.timedelta(seconds=int(time / 1000.0))
return ":".join(str(time).split(":")[:2]) + "h"
FILTERS = {
"pprint": pprint,
"list": make_list,
"get_log_errors": get_log_errors,
"format_time": format_time,
}
GLOBALS = {"datetime": datetime, "str": str}
def get_environment(paths):
loader = FileSystemLoader(paths)
environment = Environment(loader=loader, lstrip_blocks=True, trim_blocks=True)
for filter_name, filter in FILTERS.items():
environment.filters[filter_name] = filter
for global_name, global_value in GLOBALS.items():
environment.globals[global_name] = global_value
return environment
class TemplateLoader:
def __init__(self):
self.paths = []
self.reload_env()
def add_path(self, path):
if path not in self.paths and os.path.isdir(path):
self.paths.append(path)
self.reload_env()
def auto_discover(self, path=None, folder=None):
caller_folder = os.path.dirname(inspect.stack()[1][1])
if path:
caller_folder = os.path.join(caller_folder, path)
if folder:
self.add_path(os.path.join(caller_folder, folder))
else:
self.discover_folder(caller_folder)
def discover_folder(self, candidate_folder):
for folder in [
os.path.join(candidate_folder, dir) for dir in DEFAULT_TEMPLATE_FOLDERS
]:
self.add_path(folder)
def reload_env(self):
self.env = get_environment(self.paths)
def get_template(self, name):
if os.path.isabs(name): # If provided an absolute path to a template
environment = get_environment(os.path.dirname(name))
template = environment.get_template(os.path.basename(name))
else:
template = self.env.get_template(name)
return template
template_loader = TemplateLoader()
| 26.449438
| 83
| 0.661852
|
import datetime
import inspect
import os
import pprint as pretty_print
import jinja2
from jinja2 import Environment, FileSystemLoader
DEFAULT_TEMPLATE_FOLDERS = ["templates"]
def get_log_errors(logs):
return [e for e in logs.list() if e["level"] >= 40]
def make_list(obj):
return list(obj)
def pprint(obj):
return pretty_print.pformat(obj)
def format_time(time):
if not isinstance(time, datetime.timedelta):
time = datetime.timedelta(seconds=int(time / 1000.0))
return ":".join(str(time).split(":")[:2]) + "h"
FILTERS = {
"pprint": pprint,
"list": make_list,
"get_log_errors": get_log_errors,
"format_time": format_time,
}
GLOBALS = {"datetime": datetime, "str": str}
def get_environment(paths):
loader = FileSystemLoader(paths)
environment = Environment(loader=loader, lstrip_blocks=True, trim_blocks=True)
for filter_name, filter in FILTERS.items():
environment.filters[filter_name] = filter
for global_name, global_value in GLOBALS.items():
environment.globals[global_name] = global_value
return environment
class TemplateLoader:
def __init__(self):
self.paths = []
self.reload_env()
def add_path(self, path):
if path not in self.paths and os.path.isdir(path):
self.paths.append(path)
self.reload_env()
def auto_discover(self, path=None, folder=None):
caller_folder = os.path.dirname(inspect.stack()[1][1])
if path:
caller_folder = os.path.join(caller_folder, path)
if folder:
self.add_path(os.path.join(caller_folder, folder))
else:
self.discover_folder(caller_folder)
def discover_folder(self, candidate_folder):
for folder in [
os.path.join(candidate_folder, dir) for dir in DEFAULT_TEMPLATE_FOLDERS
]:
self.add_path(folder)
def reload_env(self):
self.env = get_environment(self.paths)
def get_template(self, name):
if os.path.isabs(name):
environment = get_environment(os.path.dirname(name))
template = environment.get_template(os.path.basename(name))
else:
template = self.env.get_template(name)
return template
template_loader = TemplateLoader()
| true
| true
|
7908c9281579a5ac2ed1d6c8228dd301d5d7fa73
| 1,621
|
py
|
Python
|
selenium/find_elements/app_main_menu.py
|
aminzin-1990/software-testing-repository
|
72c9ac49b8ec805e0a80f59bbc581c8324ef5abe
|
[
"Apache-2.0"
] | null | null | null |
selenium/find_elements/app_main_menu.py
|
aminzin-1990/software-testing-repository
|
72c9ac49b8ec805e0a80f59bbc581c8324ef5abe
|
[
"Apache-2.0"
] | null | null | null |
selenium/find_elements/app_main_menu.py
|
aminzin-1990/software-testing-repository
|
72c9ac49b8ec805e0a80f59bbc581c8324ef5abe
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
import time
url = "http://localhost/litecart/admin/"
browser = webdriver.Chrome()
browser.implicitly_wait(1)
without_title = 0
try:
browser.get(url)
# логинемся
login = browser.find_element_by_css_selector("[name='username']")
login.send_keys("admin")
password = browser.find_element_by_css_selector("[name='password']")
password.send_keys("admin")
button = browser.find_element_by_css_selector("[name='login']")
button.click()
time.sleep(1) # без этого слипа программа перестает работать, очень хотелось бы обсудить этот момент
# читаем основное меню
main_menu = browser.find_elements_by_css_selector("#box-apps-menu > li")
for i in range(len(main_menu)):
main_menu_temp = browser.find_elements_by_css_selector("#box-apps-menu > li")
main_menu_temp[i].click()
# читаем подменю
sub_menu = browser.find_elements_by_css_selector(".docs > li")
# условие для пунктов меню, в которых отсутствует подменю
if len(sub_menu) < 1:
title = browser.find_element_by_css_selector("#content > h1").text
if len(title) == 0:
without_title += 1
for j in range(len(sub_menu)):
sub_menu_temp = browser.find_elements_by_css_selector(".docs > li")
sub_menu_temp[j].click()
title = browser.find_element_by_css_selector("#content > h1").text
if len(title) == 0:
without_title += 1
if without_title > 0:
print('BUG!')
else:
print('NO BUG')
finally:
browser.quit()
| 30.018519
| 106
| 0.650833
|
from selenium import webdriver
import time
url = "http://localhost/litecart/admin/"
browser = webdriver.Chrome()
browser.implicitly_wait(1)
without_title = 0
try:
browser.get(url)
login = browser.find_element_by_css_selector("[name='username']")
login.send_keys("admin")
password = browser.find_element_by_css_selector("[name='password']")
password.send_keys("admin")
button = browser.find_element_by_css_selector("[name='login']")
button.click()
time.sleep(1)
main_menu = browser.find_elements_by_css_selector("#box-apps-menu > li")
for i in range(len(main_menu)):
main_menu_temp = browser.find_elements_by_css_selector("#box-apps-menu > li")
main_menu_temp[i].click()
sub_menu = browser.find_elements_by_css_selector(".docs > li")
if len(sub_menu) < 1:
title = browser.find_element_by_css_selector("#content > h1").text
if len(title) == 0:
without_title += 1
for j in range(len(sub_menu)):
sub_menu_temp = browser.find_elements_by_css_selector(".docs > li")
sub_menu_temp[j].click()
title = browser.find_element_by_css_selector("#content > h1").text
if len(title) == 0:
without_title += 1
if without_title > 0:
print('BUG!')
else:
print('NO BUG')
finally:
browser.quit()
| true
| true
|
7908c9a3be5742ab6ae458177c4da0a5715cdafd
| 500
|
py
|
Python
|
cms/test_utils/project/customuserapp/admin.py
|
samirasnoun/django_cms_gallery_image
|
7792aa06a60877d86c022e73b60d0d669e79cb74
|
[
"BSD-3-Clause"
] | 1
|
2019-04-15T10:28:46.000Z
|
2019-04-15T10:28:46.000Z
|
cms/test_utils/project/customuserapp/admin.py
|
samirasnoun/django_cms_gallery_image
|
7792aa06a60877d86c022e73b60d0d669e79cb74
|
[
"BSD-3-Clause"
] | null | null | null |
cms/test_utils/project/customuserapp/admin.py
|
samirasnoun/django_cms_gallery_image
|
7792aa06a60877d86c022e73b60d0d669e79cb74
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as OriginalUserAdmin
from django.contrib.auth.models import User as OriginalUser
from cms.utils.compat.dj import get_user_model
if getattr(OriginalUser._meta, 'swapped', False):
class UserAdmin(OriginalUserAdmin):
list_display = ('username', 'email', 'get_full_name', 'is_staff')
search_fields = ('username', 'email',)
admin.site.register(get_user_model(), UserAdmin)
| 35.714286
| 73
| 0.74
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as OriginalUserAdmin
from django.contrib.auth.models import User as OriginalUser
from cms.utils.compat.dj import get_user_model
if getattr(OriginalUser._meta, 'swapped', False):
class UserAdmin(OriginalUserAdmin):
list_display = ('username', 'email', 'get_full_name', 'is_staff')
search_fields = ('username', 'email',)
admin.site.register(get_user_model(), UserAdmin)
| true
| true
|
7908ca117a897c828c0175daa906cd87d1f78cc8
| 7,702
|
py
|
Python
|
nodes/core/hardware/nrgpio.py
|
meeki007/node-red
|
c685a310560ae9af4b28e14ed466ec788a66984c
|
[
"Apache-2.0"
] | 72
|
2016-03-24T15:47:19.000Z
|
2021-12-01T02:12:32.000Z
|
nodes/core/hardware/nrgpio.py
|
meeki007/node-red
|
c685a310560ae9af4b28e14ed466ec788a66984c
|
[
"Apache-2.0"
] | 20
|
2017-01-21T04:23:28.000Z
|
2020-01-23T12:54:44.000Z
|
nodes/core/hardware/nrgpio.py
|
meeki007/node-red
|
c685a310560ae9af4b28e14ed466ec788a66984c
|
[
"Apache-2.0"
] | 14
|
2017-04-07T18:33:05.000Z
|
2022-02-04T12:48:01.000Z
|
#!/usr/bin/python
#
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Import library functions we need
import RPi.GPIO as GPIO
import struct
import sys
import os
import subprocess
from time import sleep
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
bounce = 25
if len(sys.argv) > 2:
cmd = sys.argv[1].lower()
pin = int(sys.argv[2])
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
if cmd == "pwm":
#print("Initialised pin "+str(pin)+" to PWM")
try:
freq = int(sys.argv[3])
except:
freq = 100
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, freq)
p.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
p.ChangeDutyCycle(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "buzz":
#print("Initialised pin "+str(pin)+" to Buzz")
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.stop()
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
elif float(data) == 0:
p.stop()
else:
p.start(50)
p.ChangeFrequency(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "out":
#print("Initialised pin "+str(pin)+" to OUT")
GPIO.setup(pin,GPIO.OUT)
if len(sys.argv) == 4:
GPIO.output(pin,int(sys.argv[3]))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except:
if len(sys.argv) == 4:
data = int(sys.argv[3])
else:
data = 0
if data != 0:
data = 1
GPIO.output(pin,data)
elif cmd == "in":
#print("Initialised pin "+str(pin)+" to IN")
bounce = float(sys.argv[4])
def handle_callback(chan):
sleep(bounce/1000.0)
print(GPIO.input(chan))
if sys.argv[3].lower() == "up":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_UP)
elif sys.argv[3].lower() == "down":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_DOWN)
else:
GPIO.setup(pin,GPIO.IN)
print(GPIO.input(pin))
GPIO.add_event_detect(pin, GPIO.BOTH, callback=handle_callback, bouncetime=int(bounce))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
elif cmd == "byte":
#print("Initialised BYTE mode - "+str(pin)+)
list = [7,11,13,12,15,16,18,22]
GPIO.setup(list,GPIO.OUT)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
for bit in range(8):
if pin == 1:
mask = 1 << (7 - bit)
else:
mask = 1 << bit
GPIO.output(list[bit], data & mask)
elif cmd == "borg":
#print("Initialised BORG mode - "+str(pin)+)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
r = GPIO.PWM(11, 100)
g = GPIO.PWM(13, 100)
b = GPIO.PWM(15, 100)
r.start(0)
g.start(0)
b.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
c = data.split(",")
r.ChangeDutyCycle(float(c[0]))
g.ChangeDutyCycle(float(c[1]))
b.ChangeDutyCycle(float(c[2]))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
elif cmd == "mouse": # catch mice button events
file = open( "/dev/input/mice", "rb" )
oldbutt = 0
def getMouseEvent():
global oldbutt
global pin
buf = file.read(3)
pin = pin & 0x07
button = ord( buf[0] ) & pin # mask out just the required button(s)
if button != oldbutt: # only send if changed
oldbutt = button
print(button)
while True:
try:
getMouseEvent()
except:
file.close()
sys.exit(0)
elif cmd == "kbd": # catch keyboard button events
try:
while not os.path.isdir("/dev/input/by-path"):
sleep(10)
infile = subprocess.check_output("ls /dev/input/by-path/ | grep -m 1 'kbd'", shell=True).strip()
infile_path = "/dev/input/by-path/" + infile
EVENT_SIZE = struct.calcsize('llHHI')
file = open(infile_path, "rb")
event = file.read(EVENT_SIZE)
while event:
(tv_sec, tv_usec, type, code, value) = struct.unpack('llHHI', event)
#if type != 0 or code != 0 or value != 0:
if type == 1:
# type,code,value
print("%u,%u" % (code, value))
event = file.read(EVENT_SIZE)
print("0,0")
file.close()
sys.exit(0)
except:
file.close()
sys.exit(0)
elif len(sys.argv) > 1:
cmd = sys.argv[1].lower()
if cmd == "rev":
print(GPIO.RPI_REVISION)
elif cmd == "ver":
print(GPIO.VERSION)
elif cmd == "info":
print(GPIO.RPI_INFO)
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
print(" only ver (gpio version) and info (board information) accept no pin parameter.")
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
| 32.091667
| 108
| 0.497923
|
import RPi.GPIO as GPIO
import struct
import sys
import os
import subprocess
from time import sleep
try:
raw_input
except NameError:
raw_input = input
bounce = 25
if len(sys.argv) > 2:
cmd = sys.argv[1].lower()
pin = int(sys.argv[2])
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
if cmd == "pwm":
try:
freq = int(sys.argv[3])
except:
freq = 100
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, freq)
p.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
p.ChangeDutyCycle(float(data))
except (EOFError, SystemExit):
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "buzz":
#print("Initialised pin "+str(pin)+" to Buzz")
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.stop()
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
elif float(data) == 0:
p.stop()
else:
p.start(50)
p.ChangeFrequency(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "out":
GPIO.setup(pin,GPIO.OUT)
if len(sys.argv) == 4:
GPIO.output(pin,int(sys.argv[3]))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit):
GPIO.cleanup(pin)
sys.exit(0)
except:
if len(sys.argv) == 4:
data = int(sys.argv[3])
else:
data = 0
if data != 0:
data = 1
GPIO.output(pin,data)
elif cmd == "in":
#print("Initialised pin "+str(pin)+" to IN")
bounce = float(sys.argv[4])
def handle_callback(chan):
sleep(bounce/1000.0)
print(GPIO.input(chan))
if sys.argv[3].lower() == "up":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_UP)
elif sys.argv[3].lower() == "down":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_DOWN)
else:
GPIO.setup(pin,GPIO.IN)
print(GPIO.input(pin))
GPIO.add_event_detect(pin, GPIO.BOTH, callback=handle_callback, bouncetime=int(bounce))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
elif cmd == "byte":
list = [7,11,13,12,15,16,18,22]
GPIO.setup(list,GPIO.OUT)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit):
GPIO.cleanup()
sys.exit(0)
except:
data = 0
for bit in range(8):
if pin == 1:
mask = 1 << (7 - bit)
else:
mask = 1 << bit
GPIO.output(list[bit], data & mask)
elif cmd == "borg":
#print("Initialised BORG mode - "+str(pin)+)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
r = GPIO.PWM(11, 100)
g = GPIO.PWM(13, 100)
b = GPIO.PWM(15, 100)
r.start(0)
g.start(0)
b.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
c = data.split(",")
r.ChangeDutyCycle(float(c[0]))
g.ChangeDutyCycle(float(c[1]))
b.ChangeDutyCycle(float(c[2]))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
elif cmd == "mouse":
file = open( "/dev/input/mice", "rb" )
oldbutt = 0
def getMouseEvent():
global oldbutt
global pin
buf = file.read(3)
pin = pin & 0x07
button = ord( buf[0] ) & pin
if button != oldbutt:
oldbutt = button
print(button)
while True:
try:
getMouseEvent()
except:
file.close()
sys.exit(0)
elif cmd == "kbd":
try:
while not os.path.isdir("/dev/input/by-path"):
sleep(10)
infile = subprocess.check_output("ls /dev/input/by-path/ | grep -m 1 'kbd'", shell=True).strip()
infile_path = "/dev/input/by-path/" + infile
EVENT_SIZE = struct.calcsize('llHHI')
file = open(infile_path, "rb")
event = file.read(EVENT_SIZE)
while event:
(tv_sec, tv_usec, type, code, value) = struct.unpack('llHHI', event)
if type == 1:
print("%u,%u" % (code, value))
event = file.read(EVENT_SIZE)
print("0,0")
file.close()
sys.exit(0)
except:
file.close()
sys.exit(0)
elif len(sys.argv) > 1:
cmd = sys.argv[1].lower()
if cmd == "rev":
print(GPIO.RPI_REVISION)
elif cmd == "ver":
print(GPIO.VERSION)
elif cmd == "info":
print(GPIO.RPI_INFO)
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
print(" only ver (gpio version) and info (board information) accept no pin parameter.")
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
| true
| true
|
7908ca9bfb67407af5dbd9e8c80c852686aa4121
| 2,695
|
py
|
Python
|
tests/dumpsmach_test.py
|
zexiangliu/tulip-control
|
789a593696a03c291a553a0350fcebf3368a16da
|
[
"BSD-3-Clause"
] | 1
|
2020-02-13T14:13:50.000Z
|
2020-02-13T14:13:50.000Z
|
tests/dumpsmach_test.py
|
arw12625/tulip-control
|
eebe65c942d9b5b080a88e72f33a725b51bd52c5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dumpsmach_test.py
|
arw12625/tulip-control
|
eebe65c942d9b5b080a88e72f33a725b51bd52c5
|
[
"BSD-3-Clause"
] | 1
|
2019-07-09T16:32:39.000Z
|
2019-07-09T16:32:39.000Z
|
#!/usr/bin/env python
"""Tests for the export mechanisms of tulip.dumpsmach."""
from __future__ import print_function
import logging
import networkx as nx
from nose.tools import assert_raises
from tulip import spec, synth, dumpsmach
logging.getLogger('tulip').setLevel('ERROR')
logging.getLogger('astutils').setLevel('ERROR')
logging.getLogger('omega').setLevel('ERROR')
class basic_test(object):
def setUp(self):
self.triv = spec.GRSpec(env_vars="x", sys_vars="y",
env_init="x & y", env_prog="x",
sys_init="y", sys_prog="y && x")
self.triv_M = synth.synthesize(
self.triv, solver='omega')
self.dcounter = spec.GRSpec(
sys_vars={"y": (0, 5)},
env_init=['y = 0'],
sys_prog=["y=0", "y=5"])
self.dcounter_M = synth.synthesize(
self.dcounter, solver='omega')
self.enumf = spec.GRSpec(
sys_vars={'y': ['a', 'b']},
env_init=['y="a"'],
sys_safety=['y = "a" -> X(y = "b")',
'y = "b" -> X(y = "a")'])
self.enumf_M = synth.synthesize(
self.enumf, solver='omega')
def tearDown(self):
self.dcounter = None
self.dcounter_M = None
def test_python_case(self):
compile(dumpsmach.python_case(self.triv_M),
filename="<string>", mode="exec")
# print(dumpsmach.python_case(self.dcounter_M))
compile(dumpsmach.python_case(self.dcounter_M),
filename="<string>", mode="exec")
exec(compile(dumpsmach.python_case(self.enumf_M)
+'\nM = TulipStrategy(); M.move()',
filename="<string>", mode="exec"))
def test_nx():
g = nx.DiGraph()
g.inputs = {'a': '...', 'b': '...'}
g.outputs = {'c': '...', 'd': '...'}
start = 'Sinit'
g.add_edge(start, 0, a=0, b=0, c=0, d=0)
g.add_edge(0, 1, a=0, b=1, c=0, d=1)
g.add_edge(1, 2, a=1, b=0, c=1, d=1)
print(dumpsmach.python_case(g, classname='Machine', start='Sinit'))
exe_globals = dict()
exec(dumpsmach.python_case(g, classname='Machine', start='Sinit'), exe_globals)
m = exe_globals['Machine']() # previous line creates the class `Machine`
# Sinit -> 0
out = m.move(a=0, b=0)
assert out == dict(c=0, d=0)
# 0 -> 1
out = m.move(a=0, b=1)
assert out == dict(c=0, d=1)
# invalid input for index 2 in time sequence
with assert_raises(ValueError):
m.move(a=1, b=1)
# 1 -> 2
out = m.move(a=1, b=0)
assert out == dict(c=1, d=1)
# dead-end
with assert_raises(Exception):
m.move(a=1, b=0)
| 32.46988
| 83
| 0.547681
|
from __future__ import print_function
import logging
import networkx as nx
from nose.tools import assert_raises
from tulip import spec, synth, dumpsmach
logging.getLogger('tulip').setLevel('ERROR')
logging.getLogger('astutils').setLevel('ERROR')
logging.getLogger('omega').setLevel('ERROR')
class basic_test(object):
def setUp(self):
self.triv = spec.GRSpec(env_vars="x", sys_vars="y",
env_init="x & y", env_prog="x",
sys_init="y", sys_prog="y && x")
self.triv_M = synth.synthesize(
self.triv, solver='omega')
self.dcounter = spec.GRSpec(
sys_vars={"y": (0, 5)},
env_init=['y = 0'],
sys_prog=["y=0", "y=5"])
self.dcounter_M = synth.synthesize(
self.dcounter, solver='omega')
self.enumf = spec.GRSpec(
sys_vars={'y': ['a', 'b']},
env_init=['y="a"'],
sys_safety=['y = "a" -> X(y = "b")',
'y = "b" -> X(y = "a")'])
self.enumf_M = synth.synthesize(
self.enumf, solver='omega')
def tearDown(self):
self.dcounter = None
self.dcounter_M = None
def test_python_case(self):
compile(dumpsmach.python_case(self.triv_M),
filename="<string>", mode="exec")
compile(dumpsmach.python_case(self.dcounter_M),
filename="<string>", mode="exec")
exec(compile(dumpsmach.python_case(self.enumf_M)
+'\nM = TulipStrategy(); M.move()',
filename="<string>", mode="exec"))
def test_nx():
g = nx.DiGraph()
g.inputs = {'a': '...', 'b': '...'}
g.outputs = {'c': '...', 'd': '...'}
start = 'Sinit'
g.add_edge(start, 0, a=0, b=0, c=0, d=0)
g.add_edge(0, 1, a=0, b=1, c=0, d=1)
g.add_edge(1, 2, a=1, b=0, c=1, d=1)
print(dumpsmach.python_case(g, classname='Machine', start='Sinit'))
exe_globals = dict()
exec(dumpsmach.python_case(g, classname='Machine', start='Sinit'), exe_globals)
m = exe_globals['Machine']()
out = m.move(a=0, b=0)
assert out == dict(c=0, d=0)
out = m.move(a=0, b=1)
assert out == dict(c=0, d=1)
with assert_raises(ValueError):
m.move(a=1, b=1)
out = m.move(a=1, b=0)
assert out == dict(c=1, d=1)
with assert_raises(Exception):
m.move(a=1, b=0)
| true
| true
|
7908caa4c581577ad0c130e7ce6fab5638920619
| 3,685
|
py
|
Python
|
pygimli/viewer/mpl/matrixview.py
|
JuliusHen/gimli
|
a5c5779261acfe5a53015c9ee6f7c9ed2dd6c57f
|
[
"Apache-2.0"
] | 224
|
2015-02-20T21:36:24.000Z
|
2022-03-30T07:27:43.000Z
|
pygimli/viewer/mpl/matrixview.py
|
JuliusHen/gimli
|
a5c5779261acfe5a53015c9ee6f7c9ed2dd6c57f
|
[
"Apache-2.0"
] | 341
|
2015-05-21T14:39:51.000Z
|
2022-03-31T01:54:07.000Z
|
pygimli/viewer/mpl/matrixview.py
|
JuliusHen/gimli
|
a5c5779261acfe5a53015c9ee6f7c9ed2dd6c57f
|
[
"Apache-2.0"
] | 107
|
2015-01-24T14:40:21.000Z
|
2022-02-25T12:12:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions to draw various pygimli matrices with matplotlib."""
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
def drawSparseMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Parameters
----------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix
Returns
-------
mpl.lines.line2d
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawSparseMatrix
>>> A = pg.randn((10,10), seed=0)
>>> SM = pg.core.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, i, 5.0)
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)
>>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')
>>> _ = drawSparseMatrix(ax2, SM, color='green')
"""
row = kwargs.pop('rowOffset', 0)
col = kwargs.pop('colOffset', 0)
color = kwargs.pop('color', None)
mat = pg.utils.sparseMatrix2coo(mat)
mat.row += row
mat.col += col
gci = ax.spy(mat, color=color)
ax.autoscale(enable=True, axis='both', tight=True)
return gci
def drawBlockMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Arguments
---------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.Matrix.BlockMatrix
Keyword Arguments
-----------------
spy: bool [False]
Draw all matrix entries instead of colored blocks
Returns
-------
ax:
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> I = pg.matrix.IdentityMatrix(10)
>>> SM = pg.matrix.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, 10 - i, 5.0)
... SM.setVal(i, i, 5.0)
>>> B = pg.matrix.BlockMatrix()
>>> B.add(I, 0, 0)
0
>>> B.add(SM, 10, 10)
1
>>> print(B)
pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)
>>> _ = pg.show(B, ax=ax1)
>>> _ = pg.show(B, spy=True, ax=ax2)
"""
if kwargs.pop('spy', False):
gci = []
ids = pg.unique([e.matrixID for e in mat.entries()])
cMap = pg.plt.cm.get_cmap("Set3", len(ids))
for e in mat.entries():
mid = e.matrixID
mati = mat.mat(mid)
if isinstance(mati, pg.core.IdentityMatrix):
mati = np.eye(mati.size())
gci.append(drawSparseMatrix(ax, mati,
rowOffset=e.rowStart,
colOffset=e.colStart,
color=cMap(mid)))
return gci, None
else:
plcs = []
for e in mat.entries():
mid = e.matrixID
widthy = mat.mat(mid).rows() - 0.1 # to make sure non-matrix regions are not connected in the plot
widthx = mat.mat(mid).cols() - 0.1
plc = pg.meshtools.createRectangle([e.colStart, e.rowStart],
[e.colStart + widthx, e.rowStart + widthy],
marker=mid)
plcs.append(plc)
bm = pg.meshtools.mergePLC(plcs)
gci, cBar = pg.viewer.mpl.drawPLC(ax, bm, fitView=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
cBar.set_label("Matrix ID")
if len(mat.entries()) > 10:
gci.set_cmap("viridis")
return gci, cBar
| 28.789063
| 110
| 0.538128
|
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
def drawSparseMatrix(ax, mat, **kwargs):
row = kwargs.pop('rowOffset', 0)
col = kwargs.pop('colOffset', 0)
color = kwargs.pop('color', None)
mat = pg.utils.sparseMatrix2coo(mat)
mat.row += row
mat.col += col
gci = ax.spy(mat, color=color)
ax.autoscale(enable=True, axis='both', tight=True)
return gci
def drawBlockMatrix(ax, mat, **kwargs):
if kwargs.pop('spy', False):
gci = []
ids = pg.unique([e.matrixID for e in mat.entries()])
cMap = pg.plt.cm.get_cmap("Set3", len(ids))
for e in mat.entries():
mid = e.matrixID
mati = mat.mat(mid)
if isinstance(mati, pg.core.IdentityMatrix):
mati = np.eye(mati.size())
gci.append(drawSparseMatrix(ax, mati,
rowOffset=e.rowStart,
colOffset=e.colStart,
color=cMap(mid)))
return gci, None
else:
plcs = []
for e in mat.entries():
mid = e.matrixID
widthy = mat.mat(mid).rows() - 0.1
widthx = mat.mat(mid).cols() - 0.1
plc = pg.meshtools.createRectangle([e.colStart, e.rowStart],
[e.colStart + widthx, e.rowStart + widthy],
marker=mid)
plcs.append(plc)
bm = pg.meshtools.mergePLC(plcs)
gci, cBar = pg.viewer.mpl.drawPLC(ax, bm, fitView=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
cBar.set_label("Matrix ID")
if len(mat.entries()) > 10:
gci.set_cmap("viridis")
return gci, cBar
| true
| true
|
7908cb22148a76d380356d13fac1855f31d12772
| 52,064
|
py
|
Python
|
third_party/ridayesh_run_tag.py
|
rohanshah13/cloud-emea-copy
|
12acebc809080e5898ead86a412b17a5272759c2
|
[
"Apache-2.0"
] | null | null | null |
third_party/ridayesh_run_tag.py
|
rohanshah13/cloud-emea-copy
|
12acebc809080e5898ead86a412b17a5272759c2
|
[
"Apache-2.0"
] | null | null | null |
third_party/ridayesh_run_tag.py
|
rohanshah13/cloud-emea-copy
|
12acebc809080e5898ead86a412b17a5272759c2
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors,
# The HuggingFace Inc. team, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning models for NER and POS tagging."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
from dataclasses import dataclass, field
from typing import Optional
import json
import numpy as np
import scipy
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
# import lang2vec.lang2vec as l2v
from scipy.spatial import distance
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
HfArgumentParser,
MultiLingAdapterArguments,
AdapterConfig,
AdapterType,
)
#from xlm import XLMForTokenClassification
DEFAULT_LANGUAGES = {
'mr': 'hi',
'bn': 'hi',
'ta': 'ta',
'fo': 'fo',
'no': 'da',
'da': 'da',
'be': 'be',
'uk': 'uk',
'bg': 'bg'
}
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f'Seed = {args.seed}')
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None):
"""Train the model."""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
print(f'Local Rank = {args.local_rank}')
print(len(train_dataset))
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
logging.info([n for (n, p) in model.named_parameters() if p.requires_grad])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
best_score = 0.0
best_checkpoint = None
patience = 0
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Add here for reproductibility (even between python 2 and 3)
cur_epoch = 0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
cur_epoch += 1
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch if t is not None)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == "xlm":
inputs["langs"] = batch[4]
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel training
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training:
# Only evaluate on single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.save_only_best_checkpoint:
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
if result["f1"] > best_score:
logger.info("result['f1']={} > best_score={}".format(result["f1"], best_score))
best_score = result["f1"]
# Save the best model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving the best model checkpoint to %s", output_dir)
logger.info("Reset patience to 0")
patience = 0
else:
patience += 1
logger.info("Hit patience={}".format(patience))
if args.eval_patience > 0 and patience > args.eval_patience:
logger.info("early stop! patience={}".format(patience))
epoch_iterator.close()
train_iterator.close()
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
else:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weights, step=10, lang=None):
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"return_sequence_out": True,
"labels": batch[3]}
# logger.info(f'Language Adapters are {lang_adapter_names}')
adapter_weights = [torch.FloatTensor([0.5 for _ in range(len(lang_adapter_names))]).to(args.device) for _ in range(13)]
if args.lang_to_vec:
logger.info(lang)
logger.info(lang_adapter_names)
adapter_weights = calc_l2v_weights(lang, lang_adapter_names, args.en_weight)
logger.info(adapter_weights)
for step_no in range(step):
for w in adapter_weights: w.requires_grad = True
if args.lang_to_vec and step_no == 0:
normed_adapter_weights = adapter_weights
else:
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
# logger.info(f'Initial Adapter Weights = {normed_adapter_weights}')
model.set_active_adapters([lang_adapter_names, [task_name]])
inputs["adapter_names"] = [lang_adapter_names, [task_name]]
inputs["adapter_weights"] = normed_adapter_weights
outputs = model(**inputs)
loss, logits, orig_sequence_output = outputs[:3]
kept_logits = outputs[-1]
entropy = torch.nn.functional.softmax(kept_logits, dim=1)*torch.nn.functional.log_softmax(kept_logits, dim=1)
entropy = -entropy.sum() / kept_logits.size(0)
grads = torch.autograd.grad(entropy, adapter_weights)
#print(adapter_weights)
#print(grads)
#print(grads)
for i, w in enumerate(adapter_weights):
adapter_weights[i] = adapter_weights[i].data - 10*grads[i].data
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
#print(normed_adapter_weights)
# logger.info(f'Final Adapter Weights = {normed_adapter_weights}')
return normed_adapter_weights
def jaccard_sim(vec1, vec2):
intersection = 0
union = 0
for i in range(len(vec1)):
if vec1[i] == '--' or vec2[i] == '--':
continue
if vec1[i] == 1 or vec2[i] == 1:
union += 1
if vec1[i] == 1 and vec2[i] == 1:
intersection += 1
return intersection/union
def get_sim(lang1, lang2):
features = l2v.get_features(f'{DEFAULT_LANGUAGES[lang1]} {lang2}', 'learned')
similarity = 1 - distance.cosine(features[DEFAULT_LANGUAGES[lang1]], features[lang2])
return similarity
def get_syntax_sim(lang1, lang2):
features = l2v.get_features(f'{lang1} {lang2}', "syntax_wals|syntax_sswl|syntax_ethnologue")
similarity = jaccard_sim(features[lang1], features[lang2])
return similarity
def calc_l2v_weights(args, lang, lang_adapter_names):
adapter_weight = []
for adapter_lang in lang_adapter_names:
if args.en_weight is not None and adapter_lang == 'en':
continue
if args.lang_to_vec == 'learned':
adapter_weight.append(get_sim(lang, adapter_lang))
elif args.lang_to_vec == 'syntax':
adapter_weight.append(get_syntax_sim(lang, adapter_lang))
else:
logger.info('INVALID FEATURE TYPE')
exit()
logger.info(adapter_weight)
adapter_weight = torch.FloatTensor(adapter_weight)
adapter_weight = torch.nn.functional.softmax(adapter_weight/args.temperature).tolist()
if args.en_weight is not None:
adapter_weight = [(1 - args.en_weight)*aw for aw in adapter_weight]
en_index = lang_adapter_names.index('en')
adapter_weight.insert(en_index, args.en_weight)
return adapter_weight
def scaled_input(emb, batch_size=16, num_batch=1, baseline=None, start_i=None, end_i=None):
# shape of emb: (num_head, seq_len, seq_len)
if baseline is None:
baseline = torch.zeros_like(emb)
num_points = batch_size * num_batch
scale = 1.0 / num_points
if start_i is None:
step = (emb.unsqueeze(0) - baseline.unsqueeze(0)) * scale
res = torch.cat([torch.add(baseline.unsqueeze(0), step*i) for i in range(num_points)], dim=0)
return res, step[0]
else:
step = (emb - baseline) * scale
start_emb = torch.add(baseline, step*start_i)
end_emb = torch.add(baseline, step*end_i)
step_new = (end_emb.unsqueeze(0) - start_emb.unsqueeze(0)) * scale
res = torch.cat([torch.add(start_emb.unsqueeze(0), step_new*i) for i in range(num_points)], dim=0)
return res, step_new[0]
#Changed the default of calc_weight_step to 0
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="", lang="en", lang2id=None, print_result=True, adapter_weight=None, lang_adapter_names=None, task_name=None, calc_weight_step=0):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, lang=lang, lang2id=lang2id)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
if args.get_attr:
eval_sampler = RandomSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
else:
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
counter = 0
head_importances = None
all_head_importances = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
counter += 1
logger.info(f'Batch number = {counter}')
batch = tuple(t.to(args.device) for t in batch)
if calc_weight_step > 0:
adapter_weight = calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weight, calc_weight_step, lang=lang)
if args.get_attr:
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
inputs["output_attentions"] = True
outputs = model(**inputs)
tmp_eval_loss, logits, attentions, kept_labels, kl_logits = outputs
attr_all = []
res_attr = []
input_len = int(inputs["attention_mask"][0].sum())
example_head_importances = None
#Remove the batch_size dim since batch_size=1
logits = logits[0]
for tar_layer in range(12):
att = attentions[tar_layer][0]
pred_labels = torch.argmax(logits, dim=-1)
scale_att, step = scaled_input(att.data)
scale_att.requires_grad_(True)
attr_all = None
prob_all = None
for j_batch in range(1):
one_batch_att = scale_att[j_batch*16:(j_batch+1)*16]
_, grad = model(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask'], labels=inputs['labels'], tar_layer=tar_layer, tmp_score=one_batch_att, pred_labels=pred_labels)
grad = grad.sum(dim=0)
attr_all = grad if attr_all is None else torch.add(attr_all, grad)
# prob_all = tar_prob if prob_all is None else torch.cat([prob_all, tar_prob])
attr_all = attr_all[:,0:input_len,0:input_len] * step[:,0:input_len,0:input_len]
if example_head_importances is None:
example_head_importances = torch.amax(attr_all, dim=(1,2)).unsqueeze(0)
else:
tmp = torch.amax(attr_all, dim=(1,2))
tmp = tmp.unsqueeze(0)
example_head_importances = torch.cat((example_head_importances, tmp), dim=0)
# att = att[:,0:input_len,0:input_len]
res_attr.append(attr_all.data)
# logger.info(f'Example Head Importances = {example_head_importances}')
all_head_importances = example_head_importances.unsqueeze(0) if all_head_importances is None else torch.cat((all_head_importances, example_head_importances.unsqueeze(0)), dim=0)
head_importances = example_head_importances if head_importances is None else torch.add(head_importances, example_head_importances)
if counter == 100:
break
continue
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
# logger.info(f'Labels = {batch[3]}')
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if args.get_attr:
head_importances = head_importances/counter
logger.info(f'Head Importances = {head_importances}')
torch.save(head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_importances_100.pt'))
torch.save(all_head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_all_importances_100.pt'))
return None, None
if nb_eval_steps == 0:
results = {k: 0 for k in ["loss", "precision", "recall", "f1"]}
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
if print_result:
logger.info("***** Evaluation result %s in %s *****" % (prefix, lang))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode, lang, lang2id=None, few_shot=-1):
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
bpe_dropout = args.bpe_dropout
if mode != 'train': bpe_dropout = 0
if bpe_dropout > 0:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}_drop{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length), bpe_dropout))
else:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
langs = lang.split(',')
logger.info("all languages = {}".format(lang))
features = []
for lg in langs:
data_file = os.path.join(args.data_dir, lg, "{}.{}".format(mode, args.model_name_or_path))
logger.info("Creating features from dataset file at {} in language {}".format(data_file, lg))
examples = read_examples_from_file(data_file, lg, lang2id)
print(examples)
features_lg = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta", "xlmr"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
lang=lg,
bpe_dropout=bpe_dropout,
)
features.extend(features_lg)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file {}, len(features)={}".format(cached_features_file, len(features)))
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if few_shot > 0 and mode == 'train':
logger.info("Original no. of examples = {}".format(len(features)))
features = features[: few_shot]
logger.info('Using few-shot learning on {} examples'.format(len(features)))
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
if args.model_type == 'xlm' and features[0].langs is not None:
all_langs = torch.tensor([f.langs for f in features], dtype=torch.long)
logger.info('all_langs[0] = {}'.format(all_langs[0]))
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_langs)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
model_type: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
labels: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
data_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
output_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
max_seq_length: Optional[int] = field(
default=128, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
do_train: Optional[bool] = field(default=False )
do_eval: Optional[bool] = field(default=False )
do_predict: Optional[bool] = field(default=False )
do_adapter_predict: Optional[bool] = field(default=False )
do_predict_dev: Optional[bool] = field(default=False )
do_predict_train: Optional[bool] = field(default=False )
init_checkpoint: Optional[str] = field(default=None )
evaluate_during_training: Optional[bool] = field(default=False )
do_lower_case: Optional[bool] = field(default=False )
few_shot: Optional[int] = field(default=-1 )
per_gpu_train_batch_size: Optional[int] = field(default=8)
per_gpu_eval_batch_size: Optional[int] = field(default=8)
gradient_accumulation_steps: Optional[int] = field(default=1)
learning_rate: Optional[float] = field(default=5e-5)
weight_decay: Optional[float] = field(default=0.0)
adam_epsilon: Optional[float] = field(default=1e-8)
max_grad_norm: Optional[float] = field(default=1.0)
num_train_epochs: Optional[float] = field(default=3.0)
max_steps: Optional[int] = field(default=-1)
save_steps: Optional[int] = field(default=-1)
warmup_steps: Optional[int] = field(default=0)
logging_steps: Optional[int] = field(default=50)
save_only_best_checkpoint: Optional[bool] = field(default=False)
eval_all_checkpoints: Optional[bool] = field(default=False)
no_cuda: Optional[bool] = field(default=False)
overwrite_output_dir: Optional[bool] = field(default=False)
overwrite_cache: Optional[bool] = field(default=False)
seed: Optional[int] = field(default=42)
fp16: Optional[bool] = field(default=False)
fp16_opt_level: Optional[str] = field(default="O1")
local_rank: Optional[int] = field(default=-1)
server_ip: Optional[str] = field(default="")
server_port: Optional[str] = field(default="")
predict_langs: Optional[str] = field(default="en")
train_langs: Optional[str] = field(default="en")
log_file: Optional[str] = field(default=None)
eval_patience: Optional[int] = field(default=-1)
bpe_dropout: Optional[float] = field(default=0)
do_save_adapter_fusions: Optional[bool] = field(default=False)
task_name: Optional[str] = field(default="ner")
predict_task_adapter: Optional[str] = field(default=None)
predict_lang_adapter: Optional[str] = field(default=None)
test_adapter: Optional[bool] = field(default=False)
adapter_weight: Optional[str] = field(default=None)
lang_to_vec: Optional[str] = field(default=None)
calc_weight_step: Optional[int] = field(default=0)
predict_save_prefix: Optional[str] = field(default=None)
en_weight: Optional[float] = field(default=None)
temperature: Optional[float] = field(default=1.0)
get_attr: Optional[bool] = field(default=False)
topk: Optional[int] = field(default=1)
task: Optional[str] = field(default='udpos')
def setup_adapter(args, adapter_args, model, train_adapter=True, load_adapter=None, load_lang_adapter=None):
task_name = args.task_name or "ner"
# check if adapter already exists, otherwise add it
if task_name not in model.config.adapters.adapter_list(AdapterType.text_task):
logging.info("Trying to decide if add adapter")
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
if adapter_args.load_adapter or load_adapter:
logging.info("loading task adapter")
model.load_adapter(
adapter_args.load_adapter if load_adapter is None else load_adapter,
AdapterType.text_task,
config=adapter_config,
load_as=task_name,
)
# otherwise, add a fresh adapter
else:
logging.info("Adding task adapter")
model.add_adapter(task_name, AdapterType.text_task, config=adapter_config)
# optionally load a pre-trained language adapter
if adapter_args.load_lang_adapter or load_lang_adapter:
if load_lang_adapter is None:
# load a set of language adapters
logging.info("loading lang adpater {}".format(adapter_args.load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# if adapter_args.language == 'topk':
# assert len(args.predict_langs.split(',')) == 1
# filename = f'scripts/{args.task}/en/{args.predict_langs}.json'
# logger.info(f'Loading Adapter Languages from {filename}')
# languages = []
# with open(filename) as f:
# for i,line in enumerate(f):
# if i == args.topk:
# break
# line = json.loads(line)
# languages.append(line['adapter'].strip())
# adapter_names = [f'{lang}/wiki@ukp' for lang in languages]
# else:
# languages = adapter_args.language.split(",")
# adapter_names = adapter_args.load_lang_adapter.split(",")
# logger.info(f'Adapter Languages : {languages}, Length : {len(languages)}')
# logger.info(f'Adapter Names {adapter_names}, Length : {len(adapter_names)}')
# assert len(languages) == len(adapter_names)
# lang_adapter_names = []
# for language, adapter_name in zip(languages, adapter_names):
# logger.info(f'Language = {language}')
# logger.info(f'Adapter Name = {adapter_name}')
# lang_adapter_name = model.load_adapter(
# adapter_name,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as=language,
# )
# lang_adapter_names.append(lang_adapter_name)
else:
logging.info("loading lang adpater {}".format(load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# lang_adapter_name = model.load_adapter(
# load_lang_adapter,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as="lang",
# )
# lang_adapter_names = [lang_adapter_name]
else:
lang_adapter_name = None
lang_adapter_names = []
# Freeze all model weights except of those of this adapter
model.train_adapter([task_name])
# Set the adapters to be used in every forward pass
if lang_adapter_name:
model.set_active_adapters([lang_adapter_names, [task_name]])
else:
model.set_active_adapters([task_name])
return model, lang_adapter_names, task_name
def load_model(args, num_labels):
logger.info('Loading pretrained model and tokenizer')
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir,
)
args.model_type = config.model_type
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir,
use_fast=False,
)
if args.init_checkpoint:
logger.info("loading from init_checkpoint={}".format(args.init_checkpoint))
model = AutoModelForTokenClassification.from_pretrained(
args.init_checkpoint,
config=config,
cache_dir=args.cache_dir,
)
else:
logger.info("loading from existing model {}".format(args.model_name_or_path))
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
lang2id = config.lang2id if args.model_type == "xlm" else None
logger.info("Using lang2id = {}".format(lang2id))
return model, tokenizer, lang2id
def predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, split):
output_test_results_file = os.path.join(args.output_dir, f"{split}_results.txt")
with open(output_test_results_file, "a") as result_writer:
for lang in args.predict_langs.split(','):
#Check if language data exists
if not os.path.exists(os.path.join(args.data_dir, lang, '{}.{}'.format(split, args.model_name_or_path))):
logger.info("Language {}, split {} does not exist".format(lang, split))
continue
#Activate the required language adapter
adapter_weight = None
# if not args.adapter_weight and not args.lang_to_vec:
# if (adapter_args.train_adapter or args.test_adapter) and not args.adapter_weight:
# if lang in lang_adapter_names:
# logger.info(f'Language adapter for {lang} found')
# logger.info("Set active language adapter to {}".format(lang))
# model.set_active_adapters([[lang], [task_name]])
# else:
# logger.info(f'Language adapter for {lang} not found, using {lang_adapter_names[0]} instead')
# logger.info("Set active language adapter to {}".format(lang_adapter_names[0]))
# model.set_active_adapters([[lang_adapter_names[0]], [task_name]])
# else:
# if args.adapter_weight == 'equal':
# adapter_weight = [1/len(lang_adapter_names) for _ in lang_adapter_names]
# elif args.adapter_weight == 'equal_en':
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = [(1-args.en_weight)/(len(lang_adapter_names)-1) for _ in lang_adapter_names]
# en_index = lang_adapter_names.index('en')
# adapter_weight[en_index] = args.en_weight
# elif args.lang_to_vec:
# if args.en_weight is not None:
# logger.info(lang_adapter_names)
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = calc_l2v_weights(args, lang, lang_adapter_names)
# elif args.adapter_weight == 'load':
# filename = f'weights/{args.task}/{lang}/weights_s{args.seed}'
# logger.info(f'Loading adapter weights from {filename}')
# with open(filename) as f:
# adapter_weight = json.loads(next(f))
# elif args.adapter_weight != "0" and args.adapter_weight is not None:
# adapter_weight = [float(w) for w in args.adapter_weight.split(",")]
logger.info('Args Adapter Weight = {}'.format(args.adapter_weight))
logger.info('Adapter Languages = {}'.format(lang_adapter_names))
if adapter_weight is not None:
logger.info("Adapter Weights = {}".format(adapter_weight))
logger.info('Sum of Adapter Weights = {}'.format(sum(adapter_weight)))
logger.info("Length of Adapter Weights = {}".format(len(adapter_weight)))
# model.set_active_adapters([ lang_adapter_names, [task_name]])
#Evaluate
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=split, lang=lang, lang2id=lang2id, adapter_weight=adapter_weight, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
if args.get_attr:
continue
result_json = {}
# Save results
if args.predict_save_prefix is not None and args.predict_save_prefix:
result_json['language'] = f'{args.predict_save_prefix}_{lang}'
else:
result_json['language'] = f'{lang}'
result_json['seed'] = args.seed
result_json['language_adapters'] = lang_adapter_names
if args.adapter_weight:
result_json['adapter_weights'] = args.adapter_weight
for key in sorted(result.keys()):
result_json[key] = result[key]
result_writer.write(json.dumps(result_json) + '\n')
# Save predictions
if args.predict_save_prefix is not None and args.predict_save_prefix:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_{}_s{}_predictions.txt".format(split, args.predict_save_prefix, lang, args.seed))
else:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_s{}_predictions.txt".format(split, lang, args.seed))
infile = os.path.join(args.data_dir, lang, "{}.{}".format(split, args.model_name_or_path))
idxfile = infile + '.idx'
save_predictions(args, predictions, output_test_predictions_file, infile, idxfile)
def main():
parser = argparse.ArgumentParser()
parser = HfArgumentParser((ModelArguments, MultiLingAdapterArguments))
args, adapter_args = parser.parse_args_into_dataclasses()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which sychronizes nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(handlers = [logging.FileHandler(args.log_file), logging.StreamHandler()],
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logging.info("Input args: %r" % args)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare NER/POS task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id
# so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.do_save_full_model= (not adapter_args.train_adapter)
args.do_save_adapters=adapter_args.train_adapter
if args.do_save_adapters:
logging.info('save adapters')
logging.info(adapter_args.train_adapter)
if args.do_save_full_model:
logging.info('save model')
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank == 0:
torch.distributed.barrier()
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter:
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model)
logger.info("lang adapter names: {}".format(" ".join(lang_adapter_names)))
else:
lang_adatper_names = []
task_name = None
model.to(args.device)
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train", lang=args.train_langs, lang2id=lang2id, few_shot=args.few_shot)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use default names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
# Save model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
logging.info("Save adapter")
model_to_save.save_all_adapters(args.output_dir)
if args.do_save_adapter_fusions:
logging.info("Save adapter fusion")
model_to_save.save_all_adapter_fusions(args.output_dir)
if args.do_save_full_model:
logging.info("Save full model")
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Initialization for evaluation
results = {}
if args.init_checkpoint:
best_checkpoint = args.init_checkpoint
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best')):
best_checkpoint = os.path.join(args.output_dir, 'checkpoint-best')
else:
best_checkpoint = args.output_dir
# Evaluation
#This evaluates only if the entire model is saved, something we are not doing
if args.do_eval and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating the model on dev set of training language(en)')
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix='debugging', lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
results.update(result)
# for checkpoint in checkpoints:
# global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
# model = AutoModelForTokenClassification.from_pretrained(checkpoint)
# if adapter_args.train_adapter:
# load_adapter = checkpoint + "/" + args.task_name
# load_lang_adapter = "{}/{}".format(checkpoint, adapter_args.language)
# model.model_name = args.model_name_or_path
# model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
#
# model.to(args.device)
# result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
# if result["f1"] > best_f1:
# best_checkpoint = checkpoint
# best_f1 = result["f1"]
# if global_step:
# result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
# results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
# writer.write("best checkpoint = {}, best f1 = {}\n".format(best_checkpoint, best_f1))
if args.do_predict and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
# Prediction
logger.info('Evaluating the model on test set of all the languages specified')
#Set up the task adapter
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'test')
if args.do_predict_train and args.local_rank in [-1, 0]:
logger.info('Evaluating on the train set of all specified languages')
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'train')
#Predict dev set
if args.do_predict_dev and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating on the dev sets of all the specified languages')
#Set up task and language adapters
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'dev')
def save_predictions(args, predictions, output_file, text_file, idx_file, output_word_prediction=False):
# Save predictions
with open(text_file, "r") as text_reader, open(idx_file, "r") as idx_reader:
text = text_reader.readlines()
index = idx_reader.readlines()
assert len(text) == len(index)
# Sanity check on the predictions
with open(output_file, "w") as writer:
example_id = 0
prev_id = int(index[0])
for line, idx in zip(text, index):
if line == "" or line == "\n":
example_id += 1
else:
cur_id = int(idx)
output_line = '\n' if cur_id != prev_id else ''
if output_word_prediction:
output_line += line.split()[0] + '\t'
output_line += predictions[example_id].pop(0) + '\n'
writer.write(output_line)
prev_id = cur_id
if __name__ == "__main__":
main()
| 46.527256
| 259
| 0.688153
|
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
from dataclasses import dataclass, field
from typing import Optional
import json
import numpy as np
import scipy
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
from scipy.spatial import distance
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
HfArgumentParser,
MultiLingAdapterArguments,
AdapterConfig,
AdapterType,
)
DEFAULT_LANGUAGES = {
'mr': 'hi',
'bn': 'hi',
'ta': 'ta',
'fo': 'fo',
'no': 'da',
'da': 'da',
'be': 'be',
'uk': 'uk',
'bg': 'bg'
}
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f'Seed = {args.seed}')
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None):
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
print(f'Local Rank = {args.local_rank}')
print(len(train_dataset))
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
logging.info([n for (n, p) in model.named_parameters() if p.requires_grad])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
best_score = 0.0
best_checkpoint = None
patience = 0
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args)
cur_epoch = 0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
cur_epoch += 1
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch if t is not None)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == "xlm":
inputs["langs"] = batch[4]
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel training
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training:
# Only evaluate on single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.save_only_best_checkpoint:
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
if result["f1"] > best_score:
logger.info("result['f1']={} > best_score={}".format(result["f1"], best_score))
best_score = result["f1"]
# Save the best model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving the best model checkpoint to %s", output_dir)
logger.info("Reset patience to 0")
patience = 0
else:
patience += 1
logger.info("Hit patience={}".format(patience))
if args.eval_patience > 0 and patience > args.eval_patience:
logger.info("early stop! patience={}".format(patience))
epoch_iterator.close()
train_iterator.close()
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
else:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weights, step=10, lang=None):
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"return_sequence_out": True,
"labels": batch[3]}
# logger.info(f'Language Adapters are {lang_adapter_names}')
adapter_weights = [torch.FloatTensor([0.5 for _ in range(len(lang_adapter_names))]).to(args.device) for _ in range(13)]
if args.lang_to_vec:
logger.info(lang)
logger.info(lang_adapter_names)
adapter_weights = calc_l2v_weights(lang, lang_adapter_names, args.en_weight)
logger.info(adapter_weights)
for step_no in range(step):
for w in adapter_weights: w.requires_grad = True
if args.lang_to_vec and step_no == 0:
normed_adapter_weights = adapter_weights
else:
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
# logger.info(f'Initial Adapter Weights = {normed_adapter_weights}')
model.set_active_adapters([lang_adapter_names, [task_name]])
inputs["adapter_names"] = [lang_adapter_names, [task_name]]
inputs["adapter_weights"] = normed_adapter_weights
outputs = model(**inputs)
loss, logits, orig_sequence_output = outputs[:3]
kept_logits = outputs[-1]
entropy = torch.nn.functional.softmax(kept_logits, dim=1)*torch.nn.functional.log_softmax(kept_logits, dim=1)
entropy = -entropy.sum() / kept_logits.size(0)
grads = torch.autograd.grad(entropy, adapter_weights)
#print(adapter_weights)
#print(grads)
#print(grads)
for i, w in enumerate(adapter_weights):
adapter_weights[i] = adapter_weights[i].data - 10*grads[i].data
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
#print(normed_adapter_weights)
# logger.info(f'Final Adapter Weights = {normed_adapter_weights}')
return normed_adapter_weights
def jaccard_sim(vec1, vec2):
intersection = 0
union = 0
for i in range(len(vec1)):
if vec1[i] == '--' or vec2[i] == '--':
continue
if vec1[i] == 1 or vec2[i] == 1:
union += 1
if vec1[i] == 1 and vec2[i] == 1:
intersection += 1
return intersection/union
def get_sim(lang1, lang2):
features = l2v.get_features(f'{DEFAULT_LANGUAGES[lang1]} {lang2}', 'learned')
similarity = 1 - distance.cosine(features[DEFAULT_LANGUAGES[lang1]], features[lang2])
return similarity
def get_syntax_sim(lang1, lang2):
features = l2v.get_features(f'{lang1} {lang2}', "syntax_wals|syntax_sswl|syntax_ethnologue")
similarity = jaccard_sim(features[lang1], features[lang2])
return similarity
def calc_l2v_weights(args, lang, lang_adapter_names):
adapter_weight = []
for adapter_lang in lang_adapter_names:
if args.en_weight is not None and adapter_lang == 'en':
continue
if args.lang_to_vec == 'learned':
adapter_weight.append(get_sim(lang, adapter_lang))
elif args.lang_to_vec == 'syntax':
adapter_weight.append(get_syntax_sim(lang, adapter_lang))
else:
logger.info('INVALID FEATURE TYPE')
exit()
logger.info(adapter_weight)
adapter_weight = torch.FloatTensor(adapter_weight)
adapter_weight = torch.nn.functional.softmax(adapter_weight/args.temperature).tolist()
if args.en_weight is not None:
adapter_weight = [(1 - args.en_weight)*aw for aw in adapter_weight]
en_index = lang_adapter_names.index('en')
adapter_weight.insert(en_index, args.en_weight)
return adapter_weight
def scaled_input(emb, batch_size=16, num_batch=1, baseline=None, start_i=None, end_i=None):
# shape of emb: (num_head, seq_len, seq_len)
if baseline is None:
baseline = torch.zeros_like(emb)
num_points = batch_size * num_batch
scale = 1.0 / num_points
if start_i is None:
step = (emb.unsqueeze(0) - baseline.unsqueeze(0)) * scale
res = torch.cat([torch.add(baseline.unsqueeze(0), step*i) for i in range(num_points)], dim=0)
return res, step[0]
else:
step = (emb - baseline) * scale
start_emb = torch.add(baseline, step*start_i)
end_emb = torch.add(baseline, step*end_i)
step_new = (end_emb.unsqueeze(0) - start_emb.unsqueeze(0)) * scale
res = torch.cat([torch.add(start_emb.unsqueeze(0), step_new*i) for i in range(num_points)], dim=0)
return res, step_new[0]
#Changed the default of calc_weight_step to 0
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="", lang="en", lang2id=None, print_result=True, adapter_weight=None, lang_adapter_names=None, task_name=None, calc_weight_step=0):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, lang=lang, lang2id=lang2id)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
if args.get_attr:
eval_sampler = RandomSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
else:
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
counter = 0
head_importances = None
all_head_importances = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
counter += 1
logger.info(f'Batch number = {counter}')
batch = tuple(t.to(args.device) for t in batch)
if calc_weight_step > 0:
adapter_weight = calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weight, calc_weight_step, lang=lang)
if args.get_attr:
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
inputs["output_attentions"] = True
outputs = model(**inputs)
tmp_eval_loss, logits, attentions, kept_labels, kl_logits = outputs
attr_all = []
res_attr = []
input_len = int(inputs["attention_mask"][0].sum())
example_head_importances = None
logits = logits[0]
for tar_layer in range(12):
att = attentions[tar_layer][0]
pred_labels = torch.argmax(logits, dim=-1)
scale_att, step = scaled_input(att.data)
scale_att.requires_grad_(True)
attr_all = None
prob_all = None
for j_batch in range(1):
one_batch_att = scale_att[j_batch*16:(j_batch+1)*16]
_, grad = model(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask'], labels=inputs['labels'], tar_layer=tar_layer, tmp_score=one_batch_att, pred_labels=pred_labels)
grad = grad.sum(dim=0)
attr_all = grad if attr_all is None else torch.add(attr_all, grad)
attr_all = attr_all[:,0:input_len,0:input_len] * step[:,0:input_len,0:input_len]
if example_head_importances is None:
example_head_importances = torch.amax(attr_all, dim=(1,2)).unsqueeze(0)
else:
tmp = torch.amax(attr_all, dim=(1,2))
tmp = tmp.unsqueeze(0)
example_head_importances = torch.cat((example_head_importances, tmp), dim=0)
res_attr.append(attr_all.data)
all_head_importances = example_head_importances.unsqueeze(0) if all_head_importances is None else torch.cat((all_head_importances, example_head_importances.unsqueeze(0)), dim=0)
head_importances = example_head_importances if head_importances is None else torch.add(head_importances, example_head_importances)
if counter == 100:
break
continue
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
if args.model_type != "distilbert":
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if args.get_attr:
head_importances = head_importances/counter
logger.info(f'Head Importances = {head_importances}')
torch.save(head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_importances_100.pt'))
torch.save(all_head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_all_importances_100.pt'))
return None, None
if nb_eval_steps == 0:
results = {k: 0 for k in ["loss", "precision", "recall", "f1"]}
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
if print_result:
logger.info("***** Evaluation result %s in %s *****" % (prefix, lang))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode, lang, lang2id=None, few_shot=-1):
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
bpe_dropout = args.bpe_dropout
if mode != 'train': bpe_dropout = 0
if bpe_dropout > 0:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}_drop{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length), bpe_dropout))
else:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
langs = lang.split(',')
logger.info("all languages = {}".format(lang))
features = []
for lg in langs:
data_file = os.path.join(args.data_dir, lg, "{}.{}".format(mode, args.model_name_or_path))
logger.info("Creating features from dataset file at {} in language {}".format(data_file, lg))
examples = read_examples_from_file(data_file, lg, lang2id)
print(examples)
features_lg = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta", "xlmr"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
lang=lg,
bpe_dropout=bpe_dropout,
)
features.extend(features_lg)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file {}, len(features)={}".format(cached_features_file, len(features)))
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if few_shot > 0 and mode == 'train':
logger.info("Original no. of examples = {}".format(len(features)))
features = features[: few_shot]
logger.info('Using few-shot learning on {} examples'.format(len(features)))
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
if args.model_type == 'xlm' and features[0].langs is not None:
all_langs = torch.tensor([f.langs for f in features], dtype=torch.long)
logger.info('all_langs[0] = {}'.format(all_langs[0]))
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_langs)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
model_type: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
labels: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
data_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
output_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
max_seq_length: Optional[int] = field(
default=128, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
do_train: Optional[bool] = field(default=False )
do_eval: Optional[bool] = field(default=False )
do_predict: Optional[bool] = field(default=False )
do_adapter_predict: Optional[bool] = field(default=False )
do_predict_dev: Optional[bool] = field(default=False )
do_predict_train: Optional[bool] = field(default=False )
init_checkpoint: Optional[str] = field(default=None )
evaluate_during_training: Optional[bool] = field(default=False )
do_lower_case: Optional[bool] = field(default=False )
few_shot: Optional[int] = field(default=-1 )
per_gpu_train_batch_size: Optional[int] = field(default=8)
per_gpu_eval_batch_size: Optional[int] = field(default=8)
gradient_accumulation_steps: Optional[int] = field(default=1)
learning_rate: Optional[float] = field(default=5e-5)
weight_decay: Optional[float] = field(default=0.0)
adam_epsilon: Optional[float] = field(default=1e-8)
max_grad_norm: Optional[float] = field(default=1.0)
num_train_epochs: Optional[float] = field(default=3.0)
max_steps: Optional[int] = field(default=-1)
save_steps: Optional[int] = field(default=-1)
warmup_steps: Optional[int] = field(default=0)
logging_steps: Optional[int] = field(default=50)
save_only_best_checkpoint: Optional[bool] = field(default=False)
eval_all_checkpoints: Optional[bool] = field(default=False)
no_cuda: Optional[bool] = field(default=False)
overwrite_output_dir: Optional[bool] = field(default=False)
overwrite_cache: Optional[bool] = field(default=False)
seed: Optional[int] = field(default=42)
fp16: Optional[bool] = field(default=False)
fp16_opt_level: Optional[str] = field(default="O1")
local_rank: Optional[int] = field(default=-1)
server_ip: Optional[str] = field(default="")
server_port: Optional[str] = field(default="")
predict_langs: Optional[str] = field(default="en")
train_langs: Optional[str] = field(default="en")
log_file: Optional[str] = field(default=None)
eval_patience: Optional[int] = field(default=-1)
bpe_dropout: Optional[float] = field(default=0)
do_save_adapter_fusions: Optional[bool] = field(default=False)
task_name: Optional[str] = field(default="ner")
predict_task_adapter: Optional[str] = field(default=None)
predict_lang_adapter: Optional[str] = field(default=None)
test_adapter: Optional[bool] = field(default=False)
adapter_weight: Optional[str] = field(default=None)
lang_to_vec: Optional[str] = field(default=None)
calc_weight_step: Optional[int] = field(default=0)
predict_save_prefix: Optional[str] = field(default=None)
en_weight: Optional[float] = field(default=None)
temperature: Optional[float] = field(default=1.0)
get_attr: Optional[bool] = field(default=False)
topk: Optional[int] = field(default=1)
task: Optional[str] = field(default='udpos')
def setup_adapter(args, adapter_args, model, train_adapter=True, load_adapter=None, load_lang_adapter=None):
task_name = args.task_name or "ner"
# check if adapter already exists, otherwise add it
if task_name not in model.config.adapters.adapter_list(AdapterType.text_task):
logging.info("Trying to decide if add adapter")
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
if adapter_args.load_adapter or load_adapter:
logging.info("loading task adapter")
model.load_adapter(
adapter_args.load_adapter if load_adapter is None else load_adapter,
AdapterType.text_task,
config=adapter_config,
load_as=task_name,
)
# otherwise, add a fresh adapter
else:
logging.info("Adding task adapter")
model.add_adapter(task_name, AdapterType.text_task, config=adapter_config)
# optionally load a pre-trained language adapter
if adapter_args.load_lang_adapter or load_lang_adapter:
if load_lang_adapter is None:
# load a set of language adapters
logging.info("loading lang adpater {}".format(adapter_args.load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# if adapter_args.language == 'topk':
# assert len(args.predict_langs.split(',')) == 1
# filename = f'scripts/{args.task}/en/{args.predict_langs}.json'
# logger.info(f'Loading Adapter Languages from {filename}')
# languages = []
# with open(filename) as f:
# for i,line in enumerate(f):
# if i == args.topk:
# break
# line = json.loads(line)
# languages.append(line['adapter'].strip())
# adapter_names = [f'{lang}/wiki@ukp' for lang in languages]
# else:
# languages = adapter_args.language.split(",")
# adapter_names = adapter_args.load_lang_adapter.split(",")
# logger.info(f'Adapter Languages : {languages}, Length : {len(languages)}')
# logger.info(f'Adapter Names {adapter_names}, Length : {len(adapter_names)}')
# assert len(languages) == len(adapter_names)
# lang_adapter_names = []
# for language, adapter_name in zip(languages, adapter_names):
# logger.info(f'Language = {language}')
# logger.info(f'Adapter Name = {adapter_name}')
# lang_adapter_name = model.load_adapter(
# adapter_name,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as=language,
# )
# lang_adapter_names.append(lang_adapter_name)
else:
logging.info("loading lang adpater {}".format(load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# lang_adapter_name = model.load_adapter(
# load_lang_adapter,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as="lang",
# )
# lang_adapter_names = [lang_adapter_name]
else:
lang_adapter_name = None
lang_adapter_names = []
# Freeze all model weights except of those of this adapter
model.train_adapter([task_name])
# Set the adapters to be used in every forward pass
if lang_adapter_name:
model.set_active_adapters([lang_adapter_names, [task_name]])
else:
model.set_active_adapters([task_name])
return model, lang_adapter_names, task_name
def load_model(args, num_labels):
logger.info('Loading pretrained model and tokenizer')
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir,
)
args.model_type = config.model_type
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir,
use_fast=False,
)
if args.init_checkpoint:
logger.info("loading from init_checkpoint={}".format(args.init_checkpoint))
model = AutoModelForTokenClassification.from_pretrained(
args.init_checkpoint,
config=config,
cache_dir=args.cache_dir,
)
else:
logger.info("loading from existing model {}".format(args.model_name_or_path))
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
lang2id = config.lang2id if args.model_type == "xlm" else None
logger.info("Using lang2id = {}".format(lang2id))
return model, tokenizer, lang2id
def predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, split):
output_test_results_file = os.path.join(args.output_dir, f"{split}_results.txt")
with open(output_test_results_file, "a") as result_writer:
for lang in args.predict_langs.split(','):
#Check if language data exists
if not os.path.exists(os.path.join(args.data_dir, lang, '{}.{}'.format(split, args.model_name_or_path))):
logger.info("Language {}, split {} does not exist".format(lang, split))
continue
#Activate the required language adapter
adapter_weight = None
# if not args.adapter_weight and not args.lang_to_vec:
# if (adapter_args.train_adapter or args.test_adapter) and not args.adapter_weight:
# if lang in lang_adapter_names:
# logger.info(f'Language adapter for {lang} found')
# logger.info("Set active language adapter to {}".format(lang))
# model.set_active_adapters([[lang], [task_name]])
# else:
# logger.info(f'Language adapter for {lang} not found, using {lang_adapter_names[0]} instead')
# logger.info("Set active language adapter to {}".format(lang_adapter_names[0]))
# model.set_active_adapters([[lang_adapter_names[0]], [task_name]])
# else:
# if args.adapter_weight == 'equal':
# adapter_weight = [1/len(lang_adapter_names) for _ in lang_adapter_names]
# elif args.adapter_weight == 'equal_en':
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = [(1-args.en_weight)/(len(lang_adapter_names)-1) for _ in lang_adapter_names]
# en_index = lang_adapter_names.index('en')
# adapter_weight[en_index] = args.en_weight
# elif args.lang_to_vec:
# if args.en_weight is not None:
# logger.info(lang_adapter_names)
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = calc_l2v_weights(args, lang, lang_adapter_names)
# elif args.adapter_weight == 'load':
# filename = f'weights/{args.task}/{lang}/weights_s{args.seed}'
# logger.info(f'Loading adapter weights from {filename}')
# with open(filename) as f:
# adapter_weight = json.loads(next(f))
# elif args.adapter_weight != "0" and args.adapter_weight is not None:
# adapter_weight = [float(w) for w in args.adapter_weight.split(",")]
logger.info('Args Adapter Weight = {}'.format(args.adapter_weight))
logger.info('Adapter Languages = {}'.format(lang_adapter_names))
if adapter_weight is not None:
logger.info("Adapter Weights = {}".format(adapter_weight))
logger.info('Sum of Adapter Weights = {}'.format(sum(adapter_weight)))
logger.info("Length of Adapter Weights = {}".format(len(adapter_weight)))
# model.set_active_adapters([ lang_adapter_names, [task_name]])
#Evaluate
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=split, lang=lang, lang2id=lang2id, adapter_weight=adapter_weight, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
if args.get_attr:
continue
result_json = {}
# Save results
if args.predict_save_prefix is not None and args.predict_save_prefix:
result_json['language'] = f'{args.predict_save_prefix}_{lang}'
else:
result_json['language'] = f'{lang}'
result_json['seed'] = args.seed
result_json['language_adapters'] = lang_adapter_names
if args.adapter_weight:
result_json['adapter_weights'] = args.adapter_weight
for key in sorted(result.keys()):
result_json[key] = result[key]
result_writer.write(json.dumps(result_json) + '\n')
# Save predictions
if args.predict_save_prefix is not None and args.predict_save_prefix:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_{}_s{}_predictions.txt".format(split, args.predict_save_prefix, lang, args.seed))
else:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_s{}_predictions.txt".format(split, lang, args.seed))
infile = os.path.join(args.data_dir, lang, "{}.{}".format(split, args.model_name_or_path))
idxfile = infile + '.idx'
save_predictions(args, predictions, output_test_predictions_file, infile, idxfile)
def main():
parser = argparse.ArgumentParser()
parser = HfArgumentParser((ModelArguments, MultiLingAdapterArguments))
args, adapter_args = parser.parse_args_into_dataclasses()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which sychronizes nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(handlers = [logging.FileHandler(args.log_file), logging.StreamHandler()],
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logging.info("Input args: %r" % args)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare NER/POS task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id
# so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.do_save_full_model= (not adapter_args.train_adapter)
args.do_save_adapters=adapter_args.train_adapter
if args.do_save_adapters:
logging.info('save adapters')
logging.info(adapter_args.train_adapter)
if args.do_save_full_model:
logging.info('save model')
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank == 0:
torch.distributed.barrier()
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter:
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model)
logger.info("lang adapter names: {}".format(" ".join(lang_adapter_names)))
else:
lang_adatper_names = []
task_name = None
model.to(args.device)
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train", lang=args.train_langs, lang2id=lang2id, few_shot=args.few_shot)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use default names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
# Save model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
logging.info("Save adapter")
model_to_save.save_all_adapters(args.output_dir)
if args.do_save_adapter_fusions:
logging.info("Save adapter fusion")
model_to_save.save_all_adapter_fusions(args.output_dir)
if args.do_save_full_model:
logging.info("Save full model")
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Initialization for evaluation
results = {}
if args.init_checkpoint:
best_checkpoint = args.init_checkpoint
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best')):
best_checkpoint = os.path.join(args.output_dir, 'checkpoint-best')
else:
best_checkpoint = args.output_dir
# Evaluation
#This evaluates only if the entire model is saved, something we are not doing
if args.do_eval and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating the model on dev set of training language(en)')
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix='debugging', lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
results.update(result)
# for checkpoint in checkpoints:
# global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
# model = AutoModelForTokenClassification.from_pretrained(checkpoint)
# if adapter_args.train_adapter:
# load_adapter = checkpoint + "/" + args.task_name
# load_lang_adapter = "{}/{}".format(checkpoint, adapter_args.language)
# model.model_name = args.model_name_or_path
# model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
#
# model.to(args.device)
# result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
# if result["f1"] > best_f1:
# best_checkpoint = checkpoint
# best_f1 = result["f1"]
# if global_step:
# result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
# results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
# writer.write("best checkpoint = {}, best f1 = {}\n".format(best_checkpoint, best_f1))
if args.do_predict and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
# Prediction
logger.info('Evaluating the model on test set of all the languages specified')
#Set up the task adapter
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'test')
if args.do_predict_train and args.local_rank in [-1, 0]:
logger.info('Evaluating on the train set of all specified languages')
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'train')
#Predict dev set
if args.do_predict_dev and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating on the dev sets of all the specified languages')
#Set up task and language adapters
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'dev')
def save_predictions(args, predictions, output_file, text_file, idx_file, output_word_prediction=False):
# Save predictions
with open(text_file, "r") as text_reader, open(idx_file, "r") as idx_reader:
text = text_reader.readlines()
index = idx_reader.readlines()
assert len(text) == len(index)
# Sanity check on the predictions
with open(output_file, "w") as writer:
example_id = 0
prev_id = int(index[0])
for line, idx in zip(text, index):
if line == "" or line == "\n":
example_id += 1
else:
cur_id = int(idx)
output_line = '\n' if cur_id != prev_id else ''
if output_word_prediction:
output_line += line.split()[0] + '\t'
output_line += predictions[example_id].pop(0) + '\n'
writer.write(output_line)
prev_id = cur_id
if __name__ == "__main__":
main()
| true
| true
|
7908cbff6c3f0f0fbcecfce553790dc0729ea028
| 5,490
|
py
|
Python
|
google/ads/google_ads/v6/proto/resources/paid_organic_search_term_view_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/resources/paid_organic_search_term_view_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/resources/paid_organic_search_term_view_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/resources/paid_organic_search_term_view.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/paid_organic_search_term_view.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\036PaidOrganicSearchTermViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nEgoogle/ads/googleads/v6/resources/paid_organic_search_term_view.proto\x12!google.ads.googleads.v6.resources\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xbd\x02\n\x19PaidOrganicSearchTermView\x12Q\n\rresource_name\x18\x01 \x01(\tB:\xe0\x41\x03\xfa\x41\x34\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x1d\n\x0bsearch_term\x18\x03 \x01(\tB\x03\xe0\x41\x03H\x00\x88\x01\x01:\x9d\x01\xea\x41\x99\x01\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x63\x63ustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}B\x0e\n\x0c_search_termB\x8b\x02\n%com.google.ads.googleads.v6.resourcesB\x1ePaidOrganicSearchTermViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PAIDORGANICSEARCHTERMVIEW = _descriptor.Descriptor(
name='PaidOrganicSearchTermView',
full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A4\n2googleads.googleapis.com/PaidOrganicSearchTermView', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.search_term', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352A\231\001\n2googleads.googleapis.com/PaidOrganicSearchTermView\022ccustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView._search_term',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=199,
serialized_end=516,
)
_PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term'].fields.append(
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'])
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'].containing_oneof = _PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term']
DESCRIPTOR.message_types_by_name['PaidOrganicSearchTermView'] = _PAIDORGANICSEARCHTERMVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PaidOrganicSearchTermView = _reflection.GeneratedProtocolMessageType('PaidOrganicSearchTermView', (_message.Message,), {
'DESCRIPTOR' : _PAIDORGANICSEARCHTERMVIEW,
'__module__' : 'google.ads.googleads.v6.resources.paid_organic_search_term_view_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.PaidOrganicSearchTermView)
})
_sym_db.RegisterMessage(PaidOrganicSearchTermView)
DESCRIPTOR._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['resource_name']._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term']._options = None
_PAIDORGANICSEARCHTERMVIEW._options = None
# @@protoc_insertion_point(module_scope)
| 58.404255
| 1,006
| 0.816393
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/paid_organic_search_term_view.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\036PaidOrganicSearchTermViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nEgoogle/ads/googleads/v6/resources/paid_organic_search_term_view.proto\x12!google.ads.googleads.v6.resources\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xbd\x02\n\x19PaidOrganicSearchTermView\x12Q\n\rresource_name\x18\x01 \x01(\tB:\xe0\x41\x03\xfa\x41\x34\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x1d\n\x0bsearch_term\x18\x03 \x01(\tB\x03\xe0\x41\x03H\x00\x88\x01\x01:\x9d\x01\xea\x41\x99\x01\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x63\x63ustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}B\x0e\n\x0c_search_termB\x8b\x02\n%com.google.ads.googleads.v6.resourcesB\x1ePaidOrganicSearchTermViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PAIDORGANICSEARCHTERMVIEW = _descriptor.Descriptor(
name='PaidOrganicSearchTermView',
full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A4\n2googleads.googleapis.com/PaidOrganicSearchTermView', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.search_term', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352A\231\001\n2googleads.googleapis.com/PaidOrganicSearchTermView\022ccustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView._search_term',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=199,
serialized_end=516,
)
_PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term'].fields.append(
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'])
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'].containing_oneof = _PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term']
DESCRIPTOR.message_types_by_name['PaidOrganicSearchTermView'] = _PAIDORGANICSEARCHTERMVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PaidOrganicSearchTermView = _reflection.GeneratedProtocolMessageType('PaidOrganicSearchTermView', (_message.Message,), {
'DESCRIPTOR' : _PAIDORGANICSEARCHTERMVIEW,
'__module__' : 'google.ads.googleads.v6.resources.paid_organic_search_term_view_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.PaidOrganicSearchTermView)
})
_sym_db.RegisterMessage(PaidOrganicSearchTermView)
DESCRIPTOR._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['resource_name']._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term']._options = None
_PAIDORGANICSEARCHTERMVIEW._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
7908cf43b0d0f159ff836966761ee283b6c86bac
| 2,526
|
py
|
Python
|
erinn/python/models/DFN.py
|
swcjack6931677/ERINN
|
a4f3d0ad213515bc86e2a18575537d6affd472ac
|
[
"MIT"
] | null | null | null |
erinn/python/models/DFN.py
|
swcjack6931677/ERINN
|
a4f3d0ad213515bc86e2a18575537d6affd472ac
|
[
"MIT"
] | null | null | null |
erinn/python/models/DFN.py
|
swcjack6931677/ERINN
|
a4f3d0ad213515bc86e2a18575537d6affd472ac
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.models import Model
# 第一種架構: 深度前饋網路(deep feedforward network)
# 也叫做前饋神經網路(feedforward neural network)或多層感知機(multilayer perceptron, MLP)
def get_dfn(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = Dense(256, activation='selu', name='Dense_selu_1')(model_input)
x = BatchNormalization(name='BN_1')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_1')(x)
x = BatchNormalization(name='BN_2')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_2')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN')
if show:
print('DFN summary:')
dfn.summary()
print()
return dfn
def get_dfn_relu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization(name='BN_1')(model_input)
x = Dense(256, activation='relu', name='Dense_relu_1')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='relu', name='Dense_relu_2')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='relu', name='Dense_relu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_relu')
if show:
print('DFN_relu summary:')
dfn.summary()
print()
return dfn
def get_dfn_selu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization()(model_input)
x = Dense(256, activation='selu', name='Dense_selu_1')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='selu', name='Dense_selu_2')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='selu', name='Dense_selu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_selu')
if show:
print('DFN_selu summary:')
dfn.summary()
print()
return dfn
| 36.085714
| 75
| 0.672605
|
from __future__ import absolute_import, division, print_function
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.models import Model
def get_dfn(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = Dense(256, activation='selu', name='Dense_selu_1')(model_input)
x = BatchNormalization(name='BN_1')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_1')(x)
x = BatchNormalization(name='BN_2')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_2')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN')
if show:
print('DFN summary:')
dfn.summary()
print()
return dfn
def get_dfn_relu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization(name='BN_1')(model_input)
x = Dense(256, activation='relu', name='Dense_relu_1')(x)
x = Dense(256, activation='relu', name='Dense_relu_2')(x)
x = Dense(256, activation='relu', name='Dense_relu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_relu')
if show:
print('DFN_relu summary:')
dfn.summary()
print()
return dfn
def get_dfn_selu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization()(model_input)
x = Dense(256, activation='selu', name='Dense_selu_1')(x)
x = Dense(256, activation='selu', name='Dense_selu_2')(x)
x = Dense(256, activation='selu', name='Dense_selu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_selu')
if show:
print('DFN_selu summary:')
dfn.summary()
print()
return dfn
| true
| true
|
7908cf8049fa01d83e2ee5e22890bd4d7dd8b2d5
| 90,465
|
py
|
Python
|
certbot/tests/main_test.py
|
queilawithaQ/certbot
|
64df1fb32796fd083abd910b2ac81deaa7077c55
|
[
"Apache-2.0"
] | 1
|
2021-06-16T04:49:46.000Z
|
2021-06-16T04:49:46.000Z
|
certbot/tests/main_test.py
|
levancao798/certbot
|
32247b3c89cb44b87f764a21e6deda9168431dec
|
[
"Apache-2.0"
] | null | null | null |
certbot/tests/main_test.py
|
levancao798/certbot
|
32247b3c89cb44b87f764a21e6deda9168431dec
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
"""Tests for certbot._internal.main."""
# pylint: disable=too-many-lines
import datetime
from importlib import reload as reload_module
import io
import itertools
import json
import shutil
import sys
import tempfile
import traceback
import unittest
from typing import List
import josepy as jose
import pytz
from certbot import crypto_util
from certbot import errors
from certbot import interfaces # pylint: disable=unused-import
from certbot import util
from certbot._internal import account
from certbot._internal import cli
from certbot._internal import configuration
from certbot._internal import constants
from certbot._internal import main
from certbot._internal import updater
from certbot._internal.plugins import disco
from certbot._internal.plugins import manual
from certbot._internal.plugins import null
from certbot.compat import filesystem
from certbot.compat import os
from certbot.plugins import enhancements
import certbot.tests.util as test_util
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
CERT_PATH = test_util.vector_path('cert_512.pem')
CERT = test_util.vector_path('cert_512.pem')
CSR = test_util.vector_path('csr_512.der')
KEY = test_util.vector_path('rsa256_key.pem')
JWK = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
RSA2048_KEY_PATH = test_util.vector_path('rsa2048_key.pem')
SS_CERT_PATH = test_util.vector_path('cert_2048.pem')
class TestHandleCerts(unittest.TestCase):
"""Test for certbot._internal.main._handle_* methods"""
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_identical_cert_request_pending(self, mock_handle_migration):
mock_lineage = mock.Mock()
mock_lineage.ensure_deployed.return_value = False
# pylint: disable=protected-access
ret = main._handle_identical_cert_request(mock.Mock(), mock_lineage)
self.assertEqual(ret, ("reinstall", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_subset_cert_request(self, mock_handle_migration):
mock_config = mock.Mock()
mock_config.expand = True
mock_lineage = mock.Mock()
mock_lineage.names.return_value = ["dummy1", "dummy2"]
ret = main._handle_subset_cert_request(mock_config, ["dummy1"], mock_lineage)
self.assertEqual(ret, ("renew", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main.cli.set_by_cli")
def test_handle_unexpected_key_type_migration(self, mock_set):
config = mock.Mock()
config.key_type = "rsa"
cert = mock.Mock()
cert.private_key_type = "ecdsa"
mock_set.return_value = True
main._handle_unexpected_key_type_migration(config, cert)
mock_set.return_value = False
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "certname"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "key_type"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
class RunTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.run."""
def setUp(self):
super().setUp()
self.domain = 'example.org'
patches = [
mock.patch('certbot._internal.main._get_and_save_cert'),
mock.patch('certbot._internal.main.display_ops.success_installation'),
mock.patch('certbot._internal.main.display_ops.success_renewal'),
mock.patch('certbot._internal.main._init_le_client'),
mock.patch('certbot._internal.main._suggest_donation_if_appropriate'),
mock.patch('certbot._internal.main._report_new_cert'),
mock.patch('certbot._internal.main._find_cert'),
mock.patch('certbot._internal.eff.handle_subscription'),
]
self.mock_auth = patches[0].start()
self.mock_success_installation = patches[1].start()
self.mock_success_renewal = patches[2].start()
self.mock_init = patches[3].start()
self.mock_suggest_donation = patches[4].start()
self.mock_report_cert = patches[5].start()
self.mock_find_cert = patches[6].start()
self.mock_subscription = patches[7].start()
for patch in patches:
self.addCleanup(patch.stop)
def _call(self):
args = '-a webroot -i null -d {0}'.format(self.domain).split()
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import run
run(config, plugins)
def test_newcert_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, None
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_reinstall_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = False, mock.Mock()
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_renewal_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, mock.Mock()
self._call()
self.mock_success_renewal.assert_called_once_with([self.domain])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
def test_run_enhancement_not_supported(self, mock_choose):
mock_choose.return_value = (null.Installer(self.config, "null"), None)
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.assertRaises(errors.NotSupportedError,
main.run,
self.config, plugins)
class CertonlyTest(unittest.TestCase):
"""Tests for certbot._internal.main.certonly."""
def setUp(self):
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
with mock.patch('certbot._internal.main._suggest_donation_if_appropriate'):
with mock.patch('certbot._internal.eff.handle_subscription'):
main.certonly(config, plugins)
return mock_init() # returns the client
@mock.patch('certbot._internal.main._find_cert')
@mock.patch('certbot._internal.main._get_and_save_cert')
@mock.patch('certbot._internal.main._report_new_cert')
def test_no_reinstall_text_pause(self, unused_report, mock_auth,
mock_find_cert):
mock_notification = self.mock_get_utility().notification
mock_notification.side_effect = self._assert_no_pause
mock_auth.return_value = mock.Mock()
mock_find_cert.return_value = False, None
self._call('certonly --webroot -d example.com'.split())
def _assert_no_pause(self, message, pause=True): # pylint: disable=unused-argument
self.assertFalse(pause)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot._internal.renewal.renew_cert')
@mock.patch('certbot._internal.main._handle_unexpected_key_type_migration')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_and_certname(self, mock_report_cert,
mock_handle_type, mock_renew_cert, mock_domains, mock_lineage):
domains = ['example.com', 'test.org']
mock_domains.return_value = domains
mock_lineage.names.return_value = domains
self._call(('certonly --webroot -d example.com -d test.org '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_domains.call_count, 1)
self.assertEqual(mock_renew_cert.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
self.assertEqual(mock_handle_type.call_count, 1)
# user confirms updating lineage with new domains
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 2)
self.assertEqual(mock_domains.call_count, 2)
self.assertEqual(mock_renew_cert.call_count, 2)
self.assertEqual(mock_report_cert.call_count, 2)
self.assertEqual(mock_handle_type.call_count, 2)
# error in _ask_user_to_confirm_new_names
self.mock_get_utility().yesno.return_value = False
self.assertRaises(errors.ConfigurationError, self._call,
'certonly --webroot -d example.com -d test.com --cert-name example.com'.split())
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot.display.ops.choose_names')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_new_certname(self, mock_report_cert,
mock_lineage, mock_choose_names, mock_domains_for_certname):
mock_lineage.return_value = None
# no lineage with this name but we specified domains so create a new cert
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
# no lineage with this name and we didn't give domains
mock_choose_names.return_value = ["somename"]
mock_domains_for_certname.return_value = None
self._call(('certonly --webroot --cert-name example.com').split())
self.assertIs(mock_choose_names.called, True)
class FindDomainsOrCertnameTest(unittest.TestCase):
"""Tests for certbot._internal.main._find_domains_or_certname."""
@mock.patch('certbot.display.ops.choose_names')
def test_display_ops(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = "domainname"
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
("domainname", None))
@mock.patch('certbot.display.ops.choose_names')
def test_no_results(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = []
# pylint: disable=protected-access
self.assertRaises(errors.Error, main._find_domains_or_certname, mock_config, None)
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
def test_grab_domains(self, mock_domains):
mock_config = mock.Mock(domains=None, certname="one.com")
mock_domains.return_value = ["one.com", "two.com"]
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
(["one.com", "two.com"], "one.com"))
class RevokeTest(test_util.TempDirTestCase):
"""Tests for certbot._internal.main.revoke."""
def setUp(self):
super().setUp()
shutil.copy(CERT_PATH, self.tempdir)
self.tmp_cert_path = os.path.abspath(os.path.join(self.tempdir, 'cert_512.pem'))
patches = [
mock.patch('acme.client.BackwardsCompatibleClientV2'),
mock.patch('certbot._internal.client.Client'),
mock.patch('certbot._internal.main._determine_account'),
mock.patch('certbot._internal.main.display_ops.success_revocation')
]
self.mock_acme_client = patches[0].start()
patches[1].start()
self.mock_determine_account = patches[2].start()
self.mock_success_revoke = patches[3].start()
for patch in patches:
self.addCleanup(patch.stop)
from certbot._internal.account import Account
self.regr = mock.MagicMock()
self.meta = Account.Meta(
creation_host="test.certbot.org",
creation_dt=datetime.datetime(
2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC))
self.acc = Account(self.regr, JWK, self.meta)
self.mock_determine_account.return_value = (self.acc, None)
def _call(self, args=None):
if not args:
args = 'revoke --cert-path={0} '
args = args.format(self.tmp_cert_path).split()
cli.set_by_cli.detector = None # required to reset set_by_cli state
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import revoke
revoke(config, plugins)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_reason(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
expected = []
for reason, code in constants.REVOCATION_REASONS.items():
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path, reason).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path,
reason.upper()).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
self.assertEqual(expected, mock_revoke.call_args_list)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://acme.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_and_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
"""Revoking with --server should use the server from the CLI"""
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com --server https://other.example'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://other.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_empty_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
"""Revoking with --cert-name where the lineage server is empty shouldn't crash """
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path, server=None)
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(
mock.ANY, mock.ANY, constants.CLI_DEFAULTS['server'])
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
def test_revocation_success(self, mock_delete_if_appropriate):
self._call()
mock_delete_if_appropriate.return_value = False
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
def test_revocation_error(self):
from acme import errors as acme_errors
self.mock_acme_client.side_effect = acme_errors.ClientError()
self.assertRaises(acme_errors.ClientError, self._call)
self.mock_success_revoke.assert_not_called()
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_revocation_with_prompt(self, mock_get_utility,
mock_delete, mock_delete_if_appropriate):
mock_get_utility().yesno.return_value = False
mock_delete_if_appropriate.return_value = False
self._call()
self.assertFalse(mock_delete.called)
class DeleteIfAppropriateTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main._delete_if_appropriate """
def _call(self, mock_config):
from certbot._internal.main import _delete_if_appropriate
_delete_if_appropriate(mock_config)
def _test_delete_opt_out_common(self):
with mock.patch('certbot._internal.cert_manager.delete') as mock_delete:
self._call(self.config)
mock_delete.assert_not_called()
@test_util.patch_get_utility()
def test_delete_flag_opt_out(self, unused_mock_get_utility):
self.config.delete_after_revoke = False
self._test_delete_opt_out_common()
@test_util.patch_get_utility()
def test_delete_prompt_opt_out(self, mock_get_utility):
util_mock = mock_get_utility()
util_mock.yesno.return_value = False
self._test_delete_opt_out_common()
@mock.patch("certbot._internal.main.logger.warning")
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_overlapping_archive_dirs(self, mock_get_utility,
mock_cert_path_to_lineage, mock_archive,
mock_match_and_check_overlaps, mock_delete,
mock_renewal_file_for_certname, mock_warning):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_match_and_check_overlaps.side_effect = errors.OverlappingMatchFound()
self._call(config)
mock_delete.assert_not_called()
self.assertEqual(mock_warning.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_cert_path_only(self, mock_get_utility,
mock_cert_path_to_lineage, mock_delete, mock_archive,
mock_overlapping_archive_dirs, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_overlapping_archive_dirs.return_value = False
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_noninteractive_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.noninteractive_mode = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_opt_in_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.delete_after_revoke = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
self.assertFalse(mock_get_utility().yesno.called)
class DetermineAccountTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main._determine_account."""
def setUp(self):
super().setUp()
self.config.account = None
self.config.email = None
self.config.register_unsafely_without_email = False
self.accs = [mock.MagicMock(id='x'), mock.MagicMock(id='y')]
self.account_storage = account.AccountMemoryStorage()
# For use in saving accounts: fake out the new_authz URL.
self.mock_client = mock.MagicMock()
self.mock_client.directory.new_authz = "hi"
def _call(self):
# pylint: disable=protected-access
from certbot._internal.main import _determine_account
with mock.patch('certbot._internal.main.account.AccountFileStorage') as mock_storage, \
test_util.patch_get_utility():
mock_storage.return_value = self.account_storage
return _determine_account(self.config)
def test_args_account_set(self):
self.account_storage.save(self.accs[1], self.mock_client)
self.config.account = self.accs[1].id
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
def test_single_account(self):
self.account_storage.save(self.accs[0], self.mock_client)
self.assertEqual((self.accs[0], None), self._call())
self.assertEqual(self.accs[0].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.choose_account')
def test_multiple_accounts(self, mock_choose_accounts):
for acc in self.accs:
self.account_storage.save(acc, self.mock_client)
mock_choose_accounts.return_value = self.accs[1]
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(
set(mock_choose_accounts.call_args[0][0]), set(self.accs))
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.get_email')
@mock.patch('certbot._internal.main.display_util.notify')
def test_no_accounts_no_email(self, mock_notify, mock_get_email):
mock_get_email.return_value = 'foo@bar.baz'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (
self.accs[0], mock.sentinel.acme)
self.assertEqual((self.accs[0], mock.sentinel.acme), self._call())
client.register.assert_called_once_with(
self.config, self.account_storage, tos_cb=mock.ANY)
self.assertEqual(self.accs[0].id, self.config.account)
self.assertEqual('foo@bar.baz', self.config.email)
mock_notify.assert_called_once_with('Account registered.')
def test_no_accounts_email(self):
self.config.email = 'other email'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (self.accs[1], mock.sentinel.acme)
self._call()
self.assertEqual(self.accs[1].id, self.config.account)
self.assertEqual('other email', self.config.email)
class MainTest(test_util.ConfigTestCase):
"""Tests for different commands."""
def setUp(self):
super().setUp()
filesystem.mkdir(self.config.logs_dir)
self.standard_args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text']
self.mock_sleep = mock.patch('time.sleep').start()
def tearDown(self):
# Reset globals in cli
reload_module(cli)
super().tearDown()
def _call(self, args, stdout=None, mockisfile=False):
"""Run the cli with output streams, actual client and optionally
os.path.isfile() mocked out"""
if mockisfile:
orig_open = os.path.isfile
def mock_isfile(fn, *args, **kwargs): # pylint: disable=unused-argument
"""Mock os.path.isfile()"""
if (fn.endswith("cert") or
fn.endswith("chain") or
fn.endswith("privkey")):
return True
return orig_open(fn)
with mock.patch("certbot.compat.os.path.isfile") as mock_if:
mock_if.side_effect = mock_isfile
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
else:
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
def _call_no_clientmock(self, args, stdout=None):
"Run the client with output streams mocked out"
args = self.standard_args + args
toy_stdout = stdout if stdout else io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_stdout):
with mock.patch('certbot._internal.main.sys.stderr') as stderr:
with mock.patch("certbot.util.atexit"):
ret = main.main(args[:]) # NOTE: parser can alter its args!
return ret, toy_stdout, stderr
def test_no_flags(self):
with mock.patch('certbot._internal.main.run') as mock_run:
self._call([])
self.assertEqual(1, mock_run.call_count)
def test_version_string_program_name(self):
toy_out = io.StringIO()
toy_err = io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_out):
with mock.patch('certbot._internal.main.sys.stderr', new=toy_err):
try:
main.main(["--version"])
except SystemExit:
pass
finally:
output = toy_out.getvalue() or toy_err.getvalue()
self.assertTrue("certbot" in output, "Output is {0}".format(output))
def _cli_missing_flag(self, args, message):
"Ensure that a particular error raises a missing cli flag error containing message"
exc = None
try:
with mock.patch('certbot._internal.main.sys.stderr'):
main.main(self.standard_args + args[:]) # NOTE: parser can alter its args!
except errors.MissingCommandlineFlag as exc_:
exc = exc_
self.assertTrue(message in str(exc))
self.assertTrue(exc is not None)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_noninteractive(self, _):
args = ['-n', 'certonly']
self._cli_missing_flag(args, "specify a plugin")
args.extend(['--standalone', '-d', 'eg.is'])
self._cli_missing_flag(args, "register before running")
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot._internal.main.client.acme_client.Client')
@mock.patch('certbot._internal.main._determine_account')
@mock.patch('certbot._internal.main.client.Client.obtain_and_enroll_certificate')
@mock.patch('certbot._internal.main._get_and_save_cert')
def test_user_agent(self, gsc, _obt, det, _client, _, __, ___):
# Normally the client is totally mocked out, but here we need more
# arguments to automate it...
args = ["--standalone", "certonly", "-m", "none@none.com",
"-d", "example.com", '--agree-tos'] + self.standard_args
det.return_value = mock.MagicMock(), None
gsc.return_value = mock.MagicMock()
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
self._call_no_clientmock(args)
os_ver = util.get_os_info_ua()
ua = acme_net.call_args[1]["user_agent"]
self.assertTrue(os_ver in ua)
import platform
plat = platform.platform()
if "linux" in plat.lower():
self.assertTrue(util.get_os_info_ua() in ua)
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
ua = "bandersnatch"
args += ["--user-agent", ua]
self._call_no_clientmock(args)
acme_net.assert_called_once_with(mock.ANY, account=mock.ANY, verify_ssl=True,
user_agent=ua)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_selection(self, mock_pick_installer, _rec):
self._call(['install', '--domains', 'foo.bar', '--cert-path', 'cert',
'--key-path', 'privkey', '--chain-path', 'chain'], mockisfile=True)
self.assertEqual(mock_pick_installer.call_count, 1)
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_certname(self, _inst, _rec, mock_install):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever'], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_override(self, _inst, _rec, mock_install, _):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever',
'--key-path', test_util.temp_join('overriding_privkey')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.chain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('overriding_privkey'))
mock_install.reset()
self._call(['install', '--cert-name', 'whatever',
'--cert-path', test_util.temp_join('overriding_cert')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('overriding_cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_error(self, _inst, _rec):
self.assertRaises(errors.ConfigurationError,
self._call,
['install', '--cert-name', 'notfound',
'--key-path', 'invalid'])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.cert_manager.get_certnames')
@mock.patch('certbot._internal.main._install_cert')
def test_installer_select_cert(self, mock_inst, mock_getcert, _inst, _rec):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install'], mockisfile=True)
self.assertTrue(mock_getcert.called)
self.assertTrue(mock_inst.called)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot.util.exe_exists')
def test_configurator_selection(self, mock_exe_exists, _, __, ___):
mock_exe_exists.return_value = True
real_plugins = disco.PluginsRegistry.find_all()
args = ['--apache', '--authenticator', 'standalone']
# This needed two calls to find_all(), which we're avoiding for now
# because of possible side effects:
# https://github.com/letsencrypt/letsencrypt/commit/51ed2b681f87b1eb29088dd48718a54f401e4855
# with mock.patch('certbot._internal.cli.plugins_testable') as plugins:
# plugins.return_value = {"apache": True, "nginx": True}
# ret, _, _, _ = self._call(args)
# self.assertTrue("Too many flags setting" in ret)
args = ["install", "--nginx", "--cert-path",
test_util.temp_join('blah'), "--key-path", test_util.temp_join('blah'),
"--nginx-server-root", "/nonexistent/thing", "-d",
"example.com", "--debug"]
if "nginx" in real_plugins:
# Sending nginx a non-existent conf dir will simulate misconfiguration
# (we can only do that if certbot-nginx is actually present)
ret, _, _, _ = self._call(args)
self.assertTrue("The nginx plugin is not working" in ret)
self.assertTrue("MisconfigurationError" in ret)
self._cli_missing_flag(["--standalone"], "With the standalone plugin, you probably")
with mock.patch("certbot._internal.main._init_le_client") as mock_init:
with mock.patch("certbot._internal.main._get_and_save_cert") as mock_gsc:
mock_gsc.return_value = mock.MagicMock()
self._call(["certonly", "--manual", "-d", "foo.bar"])
unused_config, auth, unused_installer = mock_init.call_args[0]
self.assertTrue(isinstance(auth, manual.Authenticator))
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(["auth", "--standalone"])
self.assertEqual(1, mock_certonly.call_count)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_rollback(self, _):
_, _, _, client = self._call(['rollback'])
self.assertEqual(1, client.rollback.call_count)
_, _, _, client = self._call(['rollback', '--checkpoints', '123'])
client.rollback.assert_called_once_with(
mock.ANY, 123, mock.ANY, mock.ANY)
@mock.patch('certbot._internal.cert_manager.update_live_symlinks')
def test_update_symlinks(self, mock_cert_manager):
self._call_no_clientmock(['update_symlinks'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.certificates')
def test_certificates(self, mock_cert_manager):
self._call_no_clientmock(['certificates'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.delete')
def test_delete(self, mock_cert_manager):
self._call_no_clientmock(['delete'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_plugins(self, _, _det, mock_disco):
flags = ['--init', '--prepare', '--authenticators', '--installers']
for args in itertools.chain(
*(itertools.combinations(flags, r)
for r in range(len(flags)))):
self._call(['plugins'] + list(args))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args_unprivileged(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
def throw_error(directory, mode, strict):
"""Raises error.Error."""
_, _, _ = directory, mode, strict
raise errors.Error()
stdout = io.StringIO()
with mock.patch('certbot.util.set_up_core_dir') as mock_set_up_core_dir:
with test_util.patch_get_utility_with_stdout(stdout=stdout):
mock_set_up_core_dir.side_effect = throw_error
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_init(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
self.assertEqual(stdout.getvalue().strip(), str(verified))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_prepare(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init', '--prepare'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
verified.prepare.assert_called_once_with()
verified.available.assert_called_once_with()
available = verified.available()
self.assertEqual(stdout.getvalue().strip(), str(available))
def test_certonly_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(['certonly', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
config, unused_plugins = mock_certonly.call_args[0]
self.assertEqual(config.cert_path, os.path.abspath(cert))
self.assertEqual(config.key_path, os.path.abspath(key))
self.assertEqual(config.chain_path, os.path.abspath(chain))
self.assertEqual(config.fullchain_path, os.path.abspath(fullchain))
def test_certonly_bad_args(self):
try:
self._call(['-a', 'bad_auth', 'certonly'])
assert False, "Exception should have been raised"
except errors.PluginSelectionError as e:
self.assertTrue('The requested bad_auth plugin does not appear' in str(e))
def test_check_config_sanity_domain(self):
# FQDN
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'a' * 64])
# FQDN 2
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', (('a' * 50) + '.') * 10])
# Bare IP address (this is actually a different error message now)
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '204.11.231.35'])
def test_csr_with_besteffort(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0} --allow-subset-of-names'.format(CSR).split())
def test_run_with_csr(self):
# This is an error because you can only use --csr with certonly
try:
self._call(['--csr', CSR])
except errors.Error as e:
assert "Please try the certonly" in repr(e)
return
assert False, "Expected supplying --csr to fail with default verb"
def test_csr_with_no_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0}'.format(
test_util.vector_path('csr-nonames_512.pem')).split())
def test_csr_with_inconsistent_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly -d example.org --csr {0}'.format(CSR).split())
def _certonly_new_request_common(self, mock_client, args=None):
with mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname') \
as mock_renewal:
mock_renewal.return_value = ("newcert", None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
if args is None:
args = []
args += '-d foo.bar -a standalone certonly'.split()
self._call(args)
@test_util.patch_get_utility()
def test_certonly_dry_run_new_request_success(self, mock_get_utility):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = None
self._certonly_new_request_common(mock_client, ['--dry-run'])
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
# Asserts we don't suggest donating after a successful dry run
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot.crypto_util.notAfter')
@test_util.patch_get_utility()
def test_certonly_new_request_success(self, mock_get_utility, mock_notAfter, mock_subscription):
cert_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/foo.bar'))
key_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/baz.qux'))
date = '1970-01-01'
mock_notAfter().date.return_value = date
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=cert_path,
fullchain_path=cert_path, key_path=key_path)
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = mock_lineage
self._certonly_new_request_common(mock_client)
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue(cert_path in cert_msg)
self.assertTrue(date in cert_msg)
self.assertTrue(key_path in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_new_request_failure(self, mock_subscription):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = False
self.assertRaises(errors.Error,
self._certonly_new_request_common, mock_client)
self.assertFalse(mock_subscription.called)
def _test_renewal_common(self, due_for_renewal, extra_args, log_out=None,
args=None, should_renew=True, error_expected=False,
quiet_mode=False, expiry_date=datetime.datetime.now(),
reuse_key=False):
cert_path = test_util.vector_path('cert_512.pem')
chain_path = os.path.normpath(os.path.join(self.config.config_dir,
'live/foo.bar/fullchain.pem'))
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=chain_path,
cert_path=cert_path, fullchain_path=chain_path)
mock_lineage.should_autorenew.return_value = due_for_renewal
mock_lineage.has_pending_deployment.return_value = False
mock_lineage.names.return_value = ['isnot.org']
mock_lineage.private_key_type = 'RSA'
mock_certr = mock.MagicMock()
mock_key = mock.MagicMock(pem='pem_key')
mock_client = mock.MagicMock()
stdout = io.StringIO()
mock_client.obtain_certificate.return_value = (mock_certr, 'chain',
mock_key, 'csr')
def write_msg(message, *args, **kwargs): # pylint: disable=unused-argument
"""Write message to stdout."""
stdout.write(message)
try:
with mock.patch('certbot._internal.cert_manager.find_duplicative_certs') as mock_fdc:
mock_fdc.return_value = (mock_lineage, None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
if not quiet_mode:
mock_get_utility().notification.side_effect = write_msg
with mock.patch('certbot._internal.main.renewal.OpenSSL') as mock_ssl:
mock_latest = mock.MagicMock()
mock_latest.get_issuer.return_value = "Artificial pretend"
mock_ssl.crypto.load_certificate.return_value = mock_latest
with mock.patch('certbot._internal.main.renewal.crypto_util') \
as mock_crypto_util:
mock_crypto_util.notAfter.return_value = expiry_date
with mock.patch('certbot._internal.eff.handle_subscription'):
if not args:
args = ['-d', 'isnot.org', '-a', 'standalone', 'certonly']
if extra_args:
args += extra_args
try:
ret, stdout, _, _ = self._call(args, stdout)
if ret:
print("Returned", ret)
raise AssertionError(ret)
assert not error_expected, "renewal should have errored"
except: # pylint: disable=bare-except
if not error_expected:
raise AssertionError(
"Unexpected renewal error:\n" +
traceback.format_exc())
if should_renew:
if reuse_key:
# The location of the previous live privkey.pem is passed
# to obtain_certificate
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'],
os.path.normpath(os.path.join(
self.config.config_dir, "live/sample-renewal/privkey.pem")))
else:
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'], None)
else:
self.assertEqual(mock_client.obtain_certificate.call_count, 0)
except:
self._dump_log()
raise
finally:
if log_out:
with open(os.path.join(self.config.logs_dir, "letsencrypt.log")) as lf:
self.assertTrue(log_out in lf.read())
return mock_lineage, mock_get_utility, stdout
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal(self, _):
lineage, get_utility, _ = self._test_renewal_common(True, [])
self.assertEqual(lineage.save_successor.call_count, 1)
lineage.update_all_links_to.assert_called_once_with(
lineage.latest_common_version())
cert_msg = get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertTrue('donate' in get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.log.logging.handlers.RotatingFileHandler.doRollover')
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal_triggers(self, _, __):
# --dry-run should force renewal
_, get_utility, _ = self._test_renewal_common(False, ['--dry-run', '--keep'],
log_out="simulating renewal")
self.assertEqual(get_utility().add_message.call_count, 1)
self.assertTrue('dry run' in get_utility().add_message.call_args[0][0])
self._test_renewal_common(False, ['--renew-by-default', '-tvv', '--debug'],
log_out="Auto-renewal forced")
self.assertEqual(get_utility().add_message.call_count, 1)
self._test_renewal_common(False, ['-tvv', '--debug', '--keep'],
log_out="not yet due", should_renew=False)
def _dump_log(self):
print("Logs:")
log_path = os.path.join(self.config.logs_dir, "letsencrypt.log")
if os.path.exists(log_path):
with open(log_path) as lf:
print(lf.read())
def test_renew_verb(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
def test_reuse_key(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('certbot._internal.storage.RenewableCert.save_successor')
def test_reuse_key_no_dry_run(self, unused_save_successor):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('sys.stdin')
def test_noninteractive_renewal_delay(self, stdin):
stdin.isatty.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 1)
# in main.py:
# sleep_time = random.randint(1, 60*8)
sleep_call_arg = self.mock_sleep.call_args[0][0]
self.assertTrue(1 <= sleep_call_arg <= 60*8)
@mock.patch('sys.stdin')
def test_interactive_no_renewal_delay(self, stdin):
stdin.isatty.return_value = True
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 0)
@mock.patch('certbot._internal.renewal.should_renew')
def test_renew_skips_recent_certs(self, should_renew):
should_renew.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
expiry = datetime.datetime.now() + datetime.timedelta(days=90)
_, _, stdout = self._test_renewal_common(False, extra_args=None, should_renew=False,
args=['renew'], expiry_date=expiry)
self.assertTrue('No renewals were attempted.' in stdout.getvalue())
self.assertTrue('The following certificates are not due for renewal yet:' in stdout.getvalue())
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_quiet_renew(self, _):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertTrue("renew" in out)
args = ["renew", "--dry-run", "-q"]
_, _, stdout = self._test_renewal_common(True, [], args=args,
should_renew=True, quiet_mode=True)
out = stdout.getvalue()
self.assertEqual("", out)
def test_renew_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command"]
self._test_renewal_common(True, [], args=args, should_renew=False,
error_expected=True)
def test_renew_no_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command",
"--disable-hook-validation"]
with mock.patch("certbot._internal.hooks.post_hook"):
self._test_renewal_common(True, [], args=args, should_renew=True,
error_expected=False)
def test_renew_verb_empty_config(self):
rd = os.path.join(self.config.config_dir, 'renewal')
if not os.path.exists(rd):
filesystem.makedirs(rd)
with open(os.path.join(rd, 'empty.conf'), 'w'):
pass # leave the file empty
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(False, [], args=args, should_renew=False, error_expected=True)
def test_renew_with_certname(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
self._test_renewal_common(True, [], should_renew=True,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'])
def test_renew_with_bad_certname(self):
self._test_renewal_common(True, [], should_renew=False,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'],
error_expected=True)
def _make_dummy_renewal_config(self):
renewer_configs_dir = os.path.join(self.config.config_dir, 'renewal')
filesystem.makedirs(renewer_configs_dir)
with open(os.path.join(renewer_configs_dir, 'test.conf'), 'w') as f:
f.write("My contents don't matter")
def _test_renew_common(self, renewalparams=None, names=None,
assert_oc_called=None, **kwargs):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somepath/fullchain.pem"
if renewalparams is not None:
mock_lineage.configuration = {'renewalparams': renewalparams}
if names is not None:
mock_lineage.names.return_value = names
mock_rc.return_value = mock_lineage
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
kwargs.setdefault('args', ['renew'])
self._test_renewal_common(True, None, should_renew=False, **kwargs)
if assert_oc_called is not None:
if assert_oc_called:
self.assertTrue(mock_renew_cert.called)
else:
self.assertFalse(mock_renew_cert.called)
def test_renew_no_renewalparams(self):
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_no_authenticator(self):
self._test_renew_common(renewalparams={}, assert_oc_called=False,
error_expected=True)
def test_renew_with_bad_int(self):
renewalparams = {'authenticator': 'webroot',
'rsa_key_size': 'over 9000'}
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
assert_oc_called=False)
def test_renew_with_nonetype_http01(self):
renewalparams = {'authenticator': 'webroot',
'http01_port': 'None'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_bad_domain(self):
renewalparams = {'authenticator': 'webroot'}
names = ['uniçodé.com']
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
names=names, assert_oc_called=False)
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
def test_renew_with_configurator(self, mock_sel):
mock_sel.return_value = (mock.MagicMock(), mock.MagicMock())
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args='renew --configurator apache'.split())
def test_renew_plugin_config_restoration(self):
renewalparams = {'authenticator': 'webroot',
'webroot_path': 'None',
'webroot_imaginary_flag': '42'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_webroot_map(self):
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args=['renew', '--webroot-map', json.dumps({'example.com': tempfile.gettempdir()})])
def test_renew_reconstitute_error(self):
# pylint: disable=protected-access
with mock.patch('certbot._internal.main.renewal._reconstitute') as mock_reconstitute:
mock_reconstitute.side_effect = Exception
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_obtain_cert_error(self):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somewhere/fullchain.pem"
mock_rc.return_value = mock_lineage
mock_lineage.configuration = {
'renewalparams': {'authenticator': 'webroot'}}
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
mock_renew_cert.side_effect = Exception
self._test_renewal_common(True, None, error_expected=True,
args=['renew'], should_renew=False)
def test_renew_with_bad_cli_args(self):
self._test_renewal_common(True, None, args='renew -d example.com'.split(),
should_renew=False, error_expected=True)
self._test_renewal_common(True, None, args='renew --csr {0}'.format(CSR).split(),
should_renew=False, error_expected=True)
def test_no_renewal_with_hooks(self):
_, _, stdout = self._test_renewal_common(
due_for_renewal=False, extra_args=None, should_renew=False,
args=['renew', '--post-hook',
'{0} -c "print(\'hello world\');"'
.format(sys.executable)])
self.assertTrue('No hooks were run.' in stdout.getvalue())
@test_util.patch_get_utility()
@mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname')
@mock.patch('certbot._internal.main._init_le_client')
@mock.patch('certbot._internal.main._report_new_cert')
def test_certonly_reinstall(self, mock_report_new_cert, mock_init,
mock_renewal, mock_get_utility):
mock_renewal.return_value = ('reinstall', mock.MagicMock())
mock_init.return_value = mock_client = mock.MagicMock()
self._call(['-d', 'foo.bar', '-a', 'standalone', 'certonly'])
self.assertFalse(mock_client.obtain_certificate.called)
self.assertFalse(mock_client.obtain_and_enroll_certificate.called)
self.assertEqual(mock_get_utility().add_message.call_count, 0)
mock_report_new_cert.assert_not_called()
#self.assertTrue('donate' not in mock_get_utility().add_message.call_args[0][0])
def _test_certonly_csr_common(self, extra_args=None):
certr = 'certr'
chain = 'chain'
mock_client = mock.MagicMock()
mock_client.obtain_certificate_from_csr.return_value = (certr, chain)
cert_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/cert_512.pem'))
full_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/fullchain.pem'))
mock_client.save_certificate.return_value = cert_path, None, full_path
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
chain_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/chain.pem'))
args = ('-a standalone certonly --csr {0} --cert-path {1} '
'--chain-path {2} --fullchain-path {3}').format(
CSR, cert_path, chain_path, full_path).split()
if extra_args:
args += extra_args
with mock.patch('certbot._internal.main.crypto_util'):
self._call(args)
if '--dry-run' in args:
self.assertFalse(mock_client.save_certificate.called)
else:
mock_client.save_certificate.assert_called_once_with(
certr, chain, cert_path, chain_path, full_path)
return mock_get_utility
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_csr(self, mock_subscription):
mock_get_utility = self._test_certonly_csr_common()
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertFalse('Your key file has been saved at' in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
def test_certonly_csr_dry_run(self):
mock_get_utility = self._test_certonly_csr_common(['--dry-run'])
self.assertEqual(mock_get_utility().add_message.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_key(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
server = 'foo.bar'
self._call_no_clientmock(['--cert-path', SS_CERT_PATH, '--key-path', RSA2048_KEY_PATH,
'--server', server, 'revoke'])
with open(RSA2048_KEY_PATH, 'rb') as f:
mock_acme_client.BackwardsCompatibleClientV2.assert_called_once_with(
mock.ANY, jose.JWK.load(f.read()), server)
with open(SS_CERT_PATH, 'rb') as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
def test_revoke_with_key_mismatch(self):
server = 'foo.bar'
self.assertRaises(errors.Error, self._call_no_clientmock,
['--cert-path', CERT, '--key-path', KEY,
'--server', server, 'revoke'])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main._determine_account')
def test_revoke_without_key(self, mock_determine_account,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_determine_account.return_value = (mock.MagicMock(), None)
_, _, _, client = self._call(['--cert-path', CERT, 'revoke'])
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = client.acme_from_config_key().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_register(self, _):
with mock.patch('certbot._internal.main.client') as mocked_client:
acc = mock.MagicMock()
acc.id = "imaginary_account"
mocked_client.register.return_value = (acc, "worked")
self._call_no_clientmock(["register", "--email", "user@example.org"])
# TODO: It would be more correct to explicitly check that
# _determine_account() gets called in the above case,
# but coverage statistics should also show that it did.
with mock.patch('certbot._internal.main.account') as mocked_account:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = ["an account"]
x = self._call_no_clientmock(["register", "--email", "user@example.org"])
self.assertTrue("There is an existing account" in x[0])
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
@mock.patch('certbot._internal.updater._run_updaters')
def test_plugin_selection_error(self, mock_run, mock_choose):
mock_choose.side_effect = errors.PluginSelectionError
self.assertRaises(errors.PluginSelectionError, main.renew_cert,
None, None, None)
self.config.dry_run = False
updater.run_generic_updaters(self.config, None, None)
# Make sure we're returning None, and hence not trying to run the
# without installer
self.assertFalse(mock_run.called)
class UnregisterTest(unittest.TestCase):
def setUp(self):
self.patchers = {
'_determine_account': mock.patch('certbot._internal.main._determine_account'),
'account': mock.patch('certbot._internal.main.account'),
'client': mock.patch('certbot._internal.main.client'),
'get_utility': test_util.patch_get_utility()}
self.mocks = {k: v.start() for k, v in self.patchers.items()}
def tearDown(self):
for patch in self.patchers.values():
patch.stop()
def test_abort_unregister(self):
self.mocks['account'].AccountFileStorage.return_value = mock.Mock()
util_mock = self.mocks['get_utility']()
util_mock.yesno.return_value = False
config = mock.Mock()
unused_plugins = mock.Mock()
res = main.unregister(config, unused_plugins)
self.assertEqual(res, "Deactivation aborted.")
@mock.patch("certbot._internal.main.display_util.notify")
def test_unregister(self, mock_notify):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = ["an account"]
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
self.mocks['_determine_account'].return_value = (mock.MagicMock(), "foo")
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
self.assertTrue(res is None)
mock_notify.assert_called_once_with("Account deactivated.")
def test_unregister_no_account(self):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
m = "Could not find existing account to deactivate."
self.assertEqual(res, m)
self.assertFalse(cb_client.acme.deactivate_registration.called)
class MakeOrVerifyNeededDirs(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.make_or_verify_needed_dirs."""
@mock.patch("certbot._internal.main.util")
def test_it(self, mock_util):
main.make_or_verify_needed_dirs(self.config)
for core_dir in (self.config.config_dir, self.config.work_dir,):
mock_util.set_up_core_dir.assert_any_call(
core_dir, constants.CONFIG_DIRS_MODE,
self.config.strict_permissions
)
hook_dirs = (self.config.renewal_pre_hooks_dir,
self.config.renewal_deploy_hooks_dir,
self.config.renewal_post_hooks_dir,)
for hook_dir in hook_dirs:
# default mode of 755 is used
mock_util.make_or_verify_dir.assert_any_call(
hook_dir, strict=self.config.strict_permissions)
class EnhanceTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.enhance."""
def setUp(self):
super().setUp()
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.cert_manager.get_certnames') as mock_certs:
mock_certs.return_value = ['example.com']
with mock.patch('certbot._internal.cert_manager.domains_for_certname') as mock_dom:
mock_dom.return_value = ['example.com']
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_client = mock.MagicMock()
mock_client.config = config
mock_init.return_value = mock_client
main.enhance(config, plugins)
return mock_client # returns the client
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_question(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ['example.com']
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer') as mock_pick:
self._call(['enhance', '--redirect'])
self.assertTrue(mock_pick.called)
# Check that the message includes "enhancements"
self.assertTrue("enhancements" in mock_pick.call_args[0][3])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_auth_warning(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
with mock.patch('certbot._internal.main.plug_sel.logger.warning') as mock_log:
mock_client = self._call(['enhance', '-a', 'webroot', '--redirect'])
self.assertTrue(mock_log.called)
self.assertTrue("make sense" in mock_log.call_args[0][0])
self.assertTrue(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_config_call(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect', '--hsts'])
req_enh = ["redirect", "hsts"]
not_req_enh = ["uir"]
self.assertTrue(mock_client.enhance_config.called)
self.assertTrue(
all(getattr(mock_client.config, e) for e in req_enh))
self.assertFalse(
any(getattr(mock_client.config, e) for e in not_req_enh))
self.assertTrue(
"example.com" in mock_client.enhance_config.call_args[0][0])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_noninteractive(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(
chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect',
'--hsts', '--non-interactive'])
self.assertTrue(mock_client.enhance_config.called)
self.assertFalse(mock_choose.called)
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_user_abort_domains(self, _rec, mock_choose):
mock_choose.return_value = []
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
self.assertRaises(errors.Error,
self._call,
['enhance', '--redirect', '--hsts'])
def test_no_enhancements_defined(self):
self.assertRaises(errors.MisconfigurationError,
self._call, ['enhance', '-a', 'null'])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_plugin_selection_error(self, _rec, mock_choose, mock_pick):
mock_choose.return_value = ["example.com"]
mock_pick.return_value = (None, None)
mock_pick.side_effect = errors.PluginSelectionError()
mock_client = self._call(['enhance', '--hsts'])
self.assertFalse(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = self.mockinstaller
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self._call(['enhance', '--auto-hsts'])
self.assertTrue(self.mockinstaller.enable_autohsts.called)
self.assertEqual(self.mockinstaller.enable_autohsts.call_args[0][1],
["example.com", "another.tld"])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable_not_supported(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = null.Installer(self.config, "null")
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self.assertRaises(
errors.NotSupportedError,
self._call, ['enhance', '--auto-hsts'])
def test_enhancement_enable_conflict(self):
self.assertRaises(
errors.Error,
self._call, ['enhance', '--auto-hsts', '--hsts'])
class InstallTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.install."""
def setUp(self):
super().setUp()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_not_supported(self, mock_inst, _rec):
mock_inst.return_value = null.Installer(self.config, "null")
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = "nonexistent"
self.assertRaises(errors.NotSupportedError,
main.install,
self.config, plugins)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_no_certname(self, mock_inst, _rec):
mock_inst.return_value = self.mockinstaller
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = None
self.config.key_path = "/tmp/nonexistent"
self.config.cert_path = "/tmp/nonexistent"
self.assertRaises(errors.ConfigurationError,
main.install,
self.config, plugins)
class UpdateAccountTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.update_account"""
def setUp(self):
patches = {
'account': mock.patch('certbot._internal.main.account'),
'atexit': mock.patch('certbot.util.atexit'),
'client': mock.patch('certbot._internal.main.client'),
'determine_account': mock.patch('certbot._internal.main._determine_account'),
'notify': mock.patch('certbot._internal.main.display_util.notify'),
'prepare_sub': mock.patch('certbot._internal.eff.prepare_subscription'),
'util': test_util.patch_get_utility()
}
self.mocks = { k: patches[k].start() for k in patches }
for patch in patches.values():
self.addCleanup(patch.stop)
return super().setUp()
def _call(self, args):
with mock.patch('certbot._internal.main.sys.stdout'), \
mock.patch('certbot._internal.main.sys.stderr'):
args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text'] + args
return main.main(args[:]) # NOTE: parser can alter its args!
def _prepare_mock_account(self):
mock_storage = mock.MagicMock()
mock_account = mock.MagicMock()
mock_regr = mock.MagicMock()
mock_storage.find_all.return_value = [mock_account]
self.mocks['account'].AccountFileStorage.return_value = mock_storage
mock_account.regr.body = mock_regr.body
self.mocks['determine_account'].return_value = (mock_account, mock.MagicMock())
return (mock_account, mock_storage, mock_regr)
def _test_update_no_contact(self, args):
"""Utility to assert that email removal is handled correctly"""
(_, mock_storage, mock_regr) = self._prepare_mock_account()
result = self._call(args)
# When update succeeds, the return value of update_account() is None
self.assertIsNone(result)
# We submitted a registration to the server
self.assertEqual(self.mocks['client'].Client().acme.update_registration.call_count, 1)
mock_regr.body.update.assert_called_with(contact=())
# We got an update from the server and persisted it
self.assertEqual(mock_storage.update_regr.call_count, 1)
# We should have notified the user
self.mocks['notify'].assert_called_with(
'Any contact information associated with this account has been removed.'
)
# We should not have called subscription because there's no email
self.mocks['prepare_sub'].assert_not_called()
def test_no_existing_accounts(self):
"""Test that no existing account is handled correctly"""
mock_storage = mock.MagicMock()
mock_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mock_storage
self.assertEqual(self._call(['update_account', '--email', 'user@example.org']),
'Could not find an existing account to update.')
def test_update_account_remove_email(self):
"""Test that --register-unsafely-without-email is handled as no email"""
self._test_update_no_contact(['update_account', '--register-unsafely-without-email'])
def test_update_account_empty_email(self):
"""Test that providing an empty email is handled as no email"""
self._test_update_no_contact(['update_account', '-m', ''])
@mock.patch('certbot._internal.main.display_ops.get_email')
def test_update_account_with_email(self, mock_email):
"""Test that updating with a singular email is handled correctly"""
mock_email.return_value = 'user@example.com'
(_, mock_storage, _) = self._prepare_mock_account()
mock_client = mock.MagicMock()
self.mocks['client'].Client.return_value = mock_client
result = self._call(['update_account'])
# None if registration succeeds
self.assertIsNone(result)
# We should have updated the server
self.assertEqual(mock_client.acme.update_registration.call_count, 1)
# We should have updated the account on disk
self.assertEqual(mock_storage.update_regr.call_count, 1)
# Subscription should have been prompted
self.assertEqual(self.mocks['prepare_sub'].call_count, 1)
# Should have printed the email
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to user@example.com.')
def test_update_account_with_multiple_emails(self):
"""Test that multiple email addresses are handled correctly"""
(_, mock_storage, mock_regr) = self._prepare_mock_account()
self.assertIsNone(
self._call(['update_account', '-m', 'user@example.com,user@example.org'])
)
mock_regr.body.update.assert_called_with(
contact=['mailto:user@example.com', 'mailto:user@example.org']
)
self.assertEqual(mock_storage.update_regr.call_count, 1)
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to user@example.com,user@example.org.')
if __name__ == '__main__':
unittest.main() # pragma: no cover
| 48.66326
| 103
| 0.659459
|
import datetime
from importlib import reload as reload_module
import io
import itertools
import json
import shutil
import sys
import tempfile
import traceback
import unittest
from typing import List
import josepy as jose
import pytz
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot._internal import account
from certbot._internal import cli
from certbot._internal import configuration
from certbot._internal import constants
from certbot._internal import main
from certbot._internal import updater
from certbot._internal.plugins import disco
from certbot._internal.plugins import manual
from certbot._internal.plugins import null
from certbot.compat import filesystem
from certbot.compat import os
from certbot.plugins import enhancements
import certbot.tests.util as test_util
try:
import mock
except ImportError:
from unittest import mock
CERT_PATH = test_util.vector_path('cert_512.pem')
CERT = test_util.vector_path('cert_512.pem')
CSR = test_util.vector_path('csr_512.der')
KEY = test_util.vector_path('rsa256_key.pem')
JWK = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
RSA2048_KEY_PATH = test_util.vector_path('rsa2048_key.pem')
SS_CERT_PATH = test_util.vector_path('cert_2048.pem')
class TestHandleCerts(unittest.TestCase):
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_identical_cert_request_pending(self, mock_handle_migration):
mock_lineage = mock.Mock()
mock_lineage.ensure_deployed.return_value = False
ret = main._handle_identical_cert_request(mock.Mock(), mock_lineage)
self.assertEqual(ret, ("reinstall", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_subset_cert_request(self, mock_handle_migration):
mock_config = mock.Mock()
mock_config.expand = True
mock_lineage = mock.Mock()
mock_lineage.names.return_value = ["dummy1", "dummy2"]
ret = main._handle_subset_cert_request(mock_config, ["dummy1"], mock_lineage)
self.assertEqual(ret, ("renew", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main.cli.set_by_cli")
def test_handle_unexpected_key_type_migration(self, mock_set):
config = mock.Mock()
config.key_type = "rsa"
cert = mock.Mock()
cert.private_key_type = "ecdsa"
mock_set.return_value = True
main._handle_unexpected_key_type_migration(config, cert)
mock_set.return_value = False
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "certname"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "key_type"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
class RunTest(test_util.ConfigTestCase):
def setUp(self):
super().setUp()
self.domain = 'example.org'
patches = [
mock.patch('certbot._internal.main._get_and_save_cert'),
mock.patch('certbot._internal.main.display_ops.success_installation'),
mock.patch('certbot._internal.main.display_ops.success_renewal'),
mock.patch('certbot._internal.main._init_le_client'),
mock.patch('certbot._internal.main._suggest_donation_if_appropriate'),
mock.patch('certbot._internal.main._report_new_cert'),
mock.patch('certbot._internal.main._find_cert'),
mock.patch('certbot._internal.eff.handle_subscription'),
]
self.mock_auth = patches[0].start()
self.mock_success_installation = patches[1].start()
self.mock_success_renewal = patches[2].start()
self.mock_init = patches[3].start()
self.mock_suggest_donation = patches[4].start()
self.mock_report_cert = patches[5].start()
self.mock_find_cert = patches[6].start()
self.mock_subscription = patches[7].start()
for patch in patches:
self.addCleanup(patch.stop)
def _call(self):
args = '-a webroot -i null -d {0}'.format(self.domain).split()
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import run
run(config, plugins)
def test_newcert_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, None
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_reinstall_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = False, mock.Mock()
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_renewal_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, mock.Mock()
self._call()
self.mock_success_renewal.assert_called_once_with([self.domain])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
def test_run_enhancement_not_supported(self, mock_choose):
mock_choose.return_value = (null.Installer(self.config, "null"), None)
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.assertRaises(errors.NotSupportedError,
main.run,
self.config, plugins)
class CertonlyTest(unittest.TestCase):
def setUp(self):
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
with mock.patch('certbot._internal.main._suggest_donation_if_appropriate'):
with mock.patch('certbot._internal.eff.handle_subscription'):
main.certonly(config, plugins)
return mock_init()
@mock.patch('certbot._internal.main._find_cert')
@mock.patch('certbot._internal.main._get_and_save_cert')
@mock.patch('certbot._internal.main._report_new_cert')
def test_no_reinstall_text_pause(self, unused_report, mock_auth,
mock_find_cert):
mock_notification = self.mock_get_utility().notification
mock_notification.side_effect = self._assert_no_pause
mock_auth.return_value = mock.Mock()
mock_find_cert.return_value = False, None
self._call('certonly --webroot -d example.com'.split())
def _assert_no_pause(self, message, pause=True):
self.assertFalse(pause)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot._internal.renewal.renew_cert')
@mock.patch('certbot._internal.main._handle_unexpected_key_type_migration')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_and_certname(self, mock_report_cert,
mock_handle_type, mock_renew_cert, mock_domains, mock_lineage):
domains = ['example.com', 'test.org']
mock_domains.return_value = domains
mock_lineage.names.return_value = domains
self._call(('certonly --webroot -d example.com -d test.org '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_domains.call_count, 1)
self.assertEqual(mock_renew_cert.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
self.assertEqual(mock_handle_type.call_count, 1)
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 2)
self.assertEqual(mock_domains.call_count, 2)
self.assertEqual(mock_renew_cert.call_count, 2)
self.assertEqual(mock_report_cert.call_count, 2)
self.assertEqual(mock_handle_type.call_count, 2)
self.mock_get_utility().yesno.return_value = False
self.assertRaises(errors.ConfigurationError, self._call,
'certonly --webroot -d example.com -d test.com --cert-name example.com'.split())
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot.display.ops.choose_names')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_new_certname(self, mock_report_cert,
mock_lineage, mock_choose_names, mock_domains_for_certname):
mock_lineage.return_value = None
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
mock_choose_names.return_value = ["somename"]
mock_domains_for_certname.return_value = None
self._call(('certonly --webroot --cert-name example.com').split())
self.assertIs(mock_choose_names.called, True)
class FindDomainsOrCertnameTest(unittest.TestCase):
@mock.patch('certbot.display.ops.choose_names')
def test_display_ops(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = "domainname"
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
("domainname", None))
@mock.patch('certbot.display.ops.choose_names')
def test_no_results(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = []
# pylint: disable=protected-access
self.assertRaises(errors.Error, main._find_domains_or_certname, mock_config, None)
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
def test_grab_domains(self, mock_domains):
mock_config = mock.Mock(domains=None, certname="one.com")
mock_domains.return_value = ["one.com", "two.com"]
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
(["one.com", "two.com"], "one.com"))
class RevokeTest(test_util.TempDirTestCase):
def setUp(self):
super().setUp()
shutil.copy(CERT_PATH, self.tempdir)
self.tmp_cert_path = os.path.abspath(os.path.join(self.tempdir, 'cert_512.pem'))
patches = [
mock.patch('acme.client.BackwardsCompatibleClientV2'),
mock.patch('certbot._internal.client.Client'),
mock.patch('certbot._internal.main._determine_account'),
mock.patch('certbot._internal.main.display_ops.success_revocation')
]
self.mock_acme_client = patches[0].start()
patches[1].start()
self.mock_determine_account = patches[2].start()
self.mock_success_revoke = patches[3].start()
for patch in patches:
self.addCleanup(patch.stop)
from certbot._internal.account import Account
self.regr = mock.MagicMock()
self.meta = Account.Meta(
creation_host="test.certbot.org",
creation_dt=datetime.datetime(
2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC))
self.acc = Account(self.regr, JWK, self.meta)
self.mock_determine_account.return_value = (self.acc, None)
def _call(self, args=None):
if not args:
args = 'revoke --cert-path={0} '
args = args.format(self.tmp_cert_path).split()
cli.set_by_cli.detector = None # required to reset set_by_cli state
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import revoke
revoke(config, plugins)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_reason(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
expected = []
for reason, code in constants.REVOCATION_REASONS.items():
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path, reason).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path,
reason.upper()).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
self.assertEqual(expected, mock_revoke.call_args_list)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://acme.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_and_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com --server https://other.example'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://other.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_empty_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path, server=None)
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(
mock.ANY, mock.ANY, constants.CLI_DEFAULTS['server'])
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
def test_revocation_success(self, mock_delete_if_appropriate):
self._call()
mock_delete_if_appropriate.return_value = False
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
def test_revocation_error(self):
from acme import errors as acme_errors
self.mock_acme_client.side_effect = acme_errors.ClientError()
self.assertRaises(acme_errors.ClientError, self._call)
self.mock_success_revoke.assert_not_called()
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_revocation_with_prompt(self, mock_get_utility,
mock_delete, mock_delete_if_appropriate):
mock_get_utility().yesno.return_value = False
mock_delete_if_appropriate.return_value = False
self._call()
self.assertFalse(mock_delete.called)
class DeleteIfAppropriateTest(test_util.ConfigTestCase):
def _call(self, mock_config):
from certbot._internal.main import _delete_if_appropriate
_delete_if_appropriate(mock_config)
def _test_delete_opt_out_common(self):
with mock.patch('certbot._internal.cert_manager.delete') as mock_delete:
self._call(self.config)
mock_delete.assert_not_called()
@test_util.patch_get_utility()
def test_delete_flag_opt_out(self, unused_mock_get_utility):
self.config.delete_after_revoke = False
self._test_delete_opt_out_common()
@test_util.patch_get_utility()
def test_delete_prompt_opt_out(self, mock_get_utility):
util_mock = mock_get_utility()
util_mock.yesno.return_value = False
self._test_delete_opt_out_common()
@mock.patch("certbot._internal.main.logger.warning")
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_overlapping_archive_dirs(self, mock_get_utility,
mock_cert_path_to_lineage, mock_archive,
mock_match_and_check_overlaps, mock_delete,
mock_renewal_file_for_certname, mock_warning):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_match_and_check_overlaps.side_effect = errors.OverlappingMatchFound()
self._call(config)
mock_delete.assert_not_called()
self.assertEqual(mock_warning.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_cert_path_only(self, mock_get_utility,
mock_cert_path_to_lineage, mock_delete, mock_archive,
mock_overlapping_archive_dirs, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_overlapping_archive_dirs.return_value = False
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_noninteractive_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.noninteractive_mode = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_opt_in_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.delete_after_revoke = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
self.assertFalse(mock_get_utility().yesno.called)
class DetermineAccountTest(test_util.ConfigTestCase):
def setUp(self):
super().setUp()
self.config.account = None
self.config.email = None
self.config.register_unsafely_without_email = False
self.accs = [mock.MagicMock(id='x'), mock.MagicMock(id='y')]
self.account_storage = account.AccountMemoryStorage()
# For use in saving accounts: fake out the new_authz URL.
self.mock_client = mock.MagicMock()
self.mock_client.directory.new_authz = "hi"
def _call(self):
# pylint: disable=protected-access
from certbot._internal.main import _determine_account
with mock.patch('certbot._internal.main.account.AccountFileStorage') as mock_storage, \
test_util.patch_get_utility():
mock_storage.return_value = self.account_storage
return _determine_account(self.config)
def test_args_account_set(self):
self.account_storage.save(self.accs[1], self.mock_client)
self.config.account = self.accs[1].id
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
def test_single_account(self):
self.account_storage.save(self.accs[0], self.mock_client)
self.assertEqual((self.accs[0], None), self._call())
self.assertEqual(self.accs[0].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.choose_account')
def test_multiple_accounts(self, mock_choose_accounts):
for acc in self.accs:
self.account_storage.save(acc, self.mock_client)
mock_choose_accounts.return_value = self.accs[1]
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(
set(mock_choose_accounts.call_args[0][0]), set(self.accs))
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.get_email')
@mock.patch('certbot._internal.main.display_util.notify')
def test_no_accounts_no_email(self, mock_notify, mock_get_email):
mock_get_email.return_value = 'foo@bar.baz'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (
self.accs[0], mock.sentinel.acme)
self.assertEqual((self.accs[0], mock.sentinel.acme), self._call())
client.register.assert_called_once_with(
self.config, self.account_storage, tos_cb=mock.ANY)
self.assertEqual(self.accs[0].id, self.config.account)
self.assertEqual('foo@bar.baz', self.config.email)
mock_notify.assert_called_once_with('Account registered.')
def test_no_accounts_email(self):
self.config.email = 'other email'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (self.accs[1], mock.sentinel.acme)
self._call()
self.assertEqual(self.accs[1].id, self.config.account)
self.assertEqual('other email', self.config.email)
class MainTest(test_util.ConfigTestCase):
def setUp(self):
super().setUp()
filesystem.mkdir(self.config.logs_dir)
self.standard_args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text']
self.mock_sleep = mock.patch('time.sleep').start()
def tearDown(self):
# Reset globals in cli
reload_module(cli)
super().tearDown()
def _call(self, args, stdout=None, mockisfile=False):
if mockisfile:
orig_open = os.path.isfile
def mock_isfile(fn, *args, **kwargs): # pylint: disable=unused-argument
if (fn.endswith("cert") or
fn.endswith("chain") or
fn.endswith("privkey")):
return True
return orig_open(fn)
with mock.patch("certbot.compat.os.path.isfile") as mock_if:
mock_if.side_effect = mock_isfile
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
else:
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
def _call_no_clientmock(self, args, stdout=None):
args = self.standard_args + args
toy_stdout = stdout if stdout else io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_stdout):
with mock.patch('certbot._internal.main.sys.stderr') as stderr:
with mock.patch("certbot.util.atexit"):
ret = main.main(args[:]) # NOTE: parser can alter its args!
return ret, toy_stdout, stderr
def test_no_flags(self):
with mock.patch('certbot._internal.main.run') as mock_run:
self._call([])
self.assertEqual(1, mock_run.call_count)
def test_version_string_program_name(self):
toy_out = io.StringIO()
toy_err = io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_out):
with mock.patch('certbot._internal.main.sys.stderr', new=toy_err):
try:
main.main(["--version"])
except SystemExit:
pass
finally:
output = toy_out.getvalue() or toy_err.getvalue()
self.assertTrue("certbot" in output, "Output is {0}".format(output))
def _cli_missing_flag(self, args, message):
exc = None
try:
with mock.patch('certbot._internal.main.sys.stderr'):
main.main(self.standard_args + args[:]) # NOTE: parser can alter its args!
except errors.MissingCommandlineFlag as exc_:
exc = exc_
self.assertTrue(message in str(exc))
self.assertTrue(exc is not None)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_noninteractive(self, _):
args = ['-n', 'certonly']
self._cli_missing_flag(args, "specify a plugin")
args.extend(['--standalone', '-d', 'eg.is'])
self._cli_missing_flag(args, "register before running")
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot._internal.main.client.acme_client.Client')
@mock.patch('certbot._internal.main._determine_account')
@mock.patch('certbot._internal.main.client.Client.obtain_and_enroll_certificate')
@mock.patch('certbot._internal.main._get_and_save_cert')
def test_user_agent(self, gsc, _obt, det, _client, _, __, ___):
# Normally the client is totally mocked out, but here we need more
# arguments to automate it...
args = ["--standalone", "certonly", "-m", "none@none.com",
"-d", "example.com", '--agree-tos'] + self.standard_args
det.return_value = mock.MagicMock(), None
gsc.return_value = mock.MagicMock()
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
self._call_no_clientmock(args)
os_ver = util.get_os_info_ua()
ua = acme_net.call_args[1]["user_agent"]
self.assertTrue(os_ver in ua)
import platform
plat = platform.platform()
if "linux" in plat.lower():
self.assertTrue(util.get_os_info_ua() in ua)
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
ua = "bandersnatch"
args += ["--user-agent", ua]
self._call_no_clientmock(args)
acme_net.assert_called_once_with(mock.ANY, account=mock.ANY, verify_ssl=True,
user_agent=ua)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_selection(self, mock_pick_installer, _rec):
self._call(['install', '--domains', 'foo.bar', '--cert-path', 'cert',
'--key-path', 'privkey', '--chain-path', 'chain'], mockisfile=True)
self.assertEqual(mock_pick_installer.call_count, 1)
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_certname(self, _inst, _rec, mock_install):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever'], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_override(self, _inst, _rec, mock_install, _):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever',
'--key-path', test_util.temp_join('overriding_privkey')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.chain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('overriding_privkey'))
mock_install.reset()
self._call(['install', '--cert-name', 'whatever',
'--cert-path', test_util.temp_join('overriding_cert')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('overriding_cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_error(self, _inst, _rec):
self.assertRaises(errors.ConfigurationError,
self._call,
['install', '--cert-name', 'notfound',
'--key-path', 'invalid'])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.cert_manager.get_certnames')
@mock.patch('certbot._internal.main._install_cert')
def test_installer_select_cert(self, mock_inst, mock_getcert, _inst, _rec):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install'], mockisfile=True)
self.assertTrue(mock_getcert.called)
self.assertTrue(mock_inst.called)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot.util.exe_exists')
def test_configurator_selection(self, mock_exe_exists, _, __, ___):
mock_exe_exists.return_value = True
real_plugins = disco.PluginsRegistry.find_all()
args = ['--apache', '--authenticator', 'standalone']
# This needed two calls to find_all(), which we're avoiding for now
args = ["install", "--nginx", "--cert-path",
test_util.temp_join('blah'), "--key-path", test_util.temp_join('blah'),
"--nginx-server-root", "/nonexistent/thing", "-d",
"example.com", "--debug"]
if "nginx" in real_plugins:
ret, _, _, _ = self._call(args)
self.assertTrue("The nginx plugin is not working" in ret)
self.assertTrue("MisconfigurationError" in ret)
self._cli_missing_flag(["--standalone"], "With the standalone plugin, you probably")
with mock.patch("certbot._internal.main._init_le_client") as mock_init:
with mock.patch("certbot._internal.main._get_and_save_cert") as mock_gsc:
mock_gsc.return_value = mock.MagicMock()
self._call(["certonly", "--manual", "-d", "foo.bar"])
unused_config, auth, unused_installer = mock_init.call_args[0]
self.assertTrue(isinstance(auth, manual.Authenticator))
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(["auth", "--standalone"])
self.assertEqual(1, mock_certonly.call_count)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_rollback(self, _):
_, _, _, client = self._call(['rollback'])
self.assertEqual(1, client.rollback.call_count)
_, _, _, client = self._call(['rollback', '--checkpoints', '123'])
client.rollback.assert_called_once_with(
mock.ANY, 123, mock.ANY, mock.ANY)
@mock.patch('certbot._internal.cert_manager.update_live_symlinks')
def test_update_symlinks(self, mock_cert_manager):
self._call_no_clientmock(['update_symlinks'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.certificates')
def test_certificates(self, mock_cert_manager):
self._call_no_clientmock(['certificates'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.delete')
def test_delete(self, mock_cert_manager):
self._call_no_clientmock(['delete'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_plugins(self, _, _det, mock_disco):
flags = ['--init', '--prepare', '--authenticators', '--installers']
for args in itertools.chain(
*(itertools.combinations(flags, r)
for r in range(len(flags)))):
self._call(['plugins'] + list(args))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args_unprivileged(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
def throw_error(directory, mode, strict):
_, _, _ = directory, mode, strict
raise errors.Error()
stdout = io.StringIO()
with mock.patch('certbot.util.set_up_core_dir') as mock_set_up_core_dir:
with test_util.patch_get_utility_with_stdout(stdout=stdout):
mock_set_up_core_dir.side_effect = throw_error
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_init(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
self.assertEqual(stdout.getvalue().strip(), str(verified))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_prepare(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init', '--prepare'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
verified.prepare.assert_called_once_with()
verified.available.assert_called_once_with()
available = verified.available()
self.assertEqual(stdout.getvalue().strip(), str(available))
def test_certonly_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(['certonly', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
config, unused_plugins = mock_certonly.call_args[0]
self.assertEqual(config.cert_path, os.path.abspath(cert))
self.assertEqual(config.key_path, os.path.abspath(key))
self.assertEqual(config.chain_path, os.path.abspath(chain))
self.assertEqual(config.fullchain_path, os.path.abspath(fullchain))
def test_certonly_bad_args(self):
try:
self._call(['-a', 'bad_auth', 'certonly'])
assert False, "Exception should have been raised"
except errors.PluginSelectionError as e:
self.assertTrue('The requested bad_auth plugin does not appear' in str(e))
def test_check_config_sanity_domain(self):
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'a' * 64])
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', (('a' * 50) + '.') * 10])
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '204.11.231.35'])
def test_csr_with_besteffort(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0} --allow-subset-of-names'.format(CSR).split())
def test_run_with_csr(self):
try:
self._call(['--csr', CSR])
except errors.Error as e:
assert "Please try the certonly" in repr(e)
return
assert False, "Expected supplying --csr to fail with default verb"
def test_csr_with_no_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0}'.format(
test_util.vector_path('csr-nonames_512.pem')).split())
def test_csr_with_inconsistent_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly -d example.org --csr {0}'.format(CSR).split())
def _certonly_new_request_common(self, mock_client, args=None):
with mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname') \
as mock_renewal:
mock_renewal.return_value = ("newcert", None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
if args is None:
args = []
args += '-d foo.bar -a standalone certonly'.split()
self._call(args)
@test_util.patch_get_utility()
def test_certonly_dry_run_new_request_success(self, mock_get_utility):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = None
self._certonly_new_request_common(mock_client, ['--dry-run'])
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot.crypto_util.notAfter')
@test_util.patch_get_utility()
def test_certonly_new_request_success(self, mock_get_utility, mock_notAfter, mock_subscription):
cert_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/foo.bar'))
key_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/baz.qux'))
date = '1970-01-01'
mock_notAfter().date.return_value = date
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=cert_path,
fullchain_path=cert_path, key_path=key_path)
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = mock_lineage
self._certonly_new_request_common(mock_client)
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue(cert_path in cert_msg)
self.assertTrue(date in cert_msg)
self.assertTrue(key_path in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_new_request_failure(self, mock_subscription):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = False
self.assertRaises(errors.Error,
self._certonly_new_request_common, mock_client)
self.assertFalse(mock_subscription.called)
def _test_renewal_common(self, due_for_renewal, extra_args, log_out=None,
args=None, should_renew=True, error_expected=False,
quiet_mode=False, expiry_date=datetime.datetime.now(),
reuse_key=False):
cert_path = test_util.vector_path('cert_512.pem')
chain_path = os.path.normpath(os.path.join(self.config.config_dir,
'live/foo.bar/fullchain.pem'))
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=chain_path,
cert_path=cert_path, fullchain_path=chain_path)
mock_lineage.should_autorenew.return_value = due_for_renewal
mock_lineage.has_pending_deployment.return_value = False
mock_lineage.names.return_value = ['isnot.org']
mock_lineage.private_key_type = 'RSA'
mock_certr = mock.MagicMock()
mock_key = mock.MagicMock(pem='pem_key')
mock_client = mock.MagicMock()
stdout = io.StringIO()
mock_client.obtain_certificate.return_value = (mock_certr, 'chain',
mock_key, 'csr')
def write_msg(message, *args, **kwargs): # pylint: disable=unused-argument
stdout.write(message)
try:
with mock.patch('certbot._internal.cert_manager.find_duplicative_certs') as mock_fdc:
mock_fdc.return_value = (mock_lineage, None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
if not quiet_mode:
mock_get_utility().notification.side_effect = write_msg
with mock.patch('certbot._internal.main.renewal.OpenSSL') as mock_ssl:
mock_latest = mock.MagicMock()
mock_latest.get_issuer.return_value = "Artificial pretend"
mock_ssl.crypto.load_certificate.return_value = mock_latest
with mock.patch('certbot._internal.main.renewal.crypto_util') \
as mock_crypto_util:
mock_crypto_util.notAfter.return_value = expiry_date
with mock.patch('certbot._internal.eff.handle_subscription'):
if not args:
args = ['-d', 'isnot.org', '-a', 'standalone', 'certonly']
if extra_args:
args += extra_args
try:
ret, stdout, _, _ = self._call(args, stdout)
if ret:
print("Returned", ret)
raise AssertionError(ret)
assert not error_expected, "renewal should have errored"
except: # pylint: disable=bare-except
if not error_expected:
raise AssertionError(
"Unexpected renewal error:\n" +
traceback.format_exc())
if should_renew:
if reuse_key:
# The location of the previous live privkey.pem is passed
# to obtain_certificate
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'],
os.path.normpath(os.path.join(
self.config.config_dir, "live/sample-renewal/privkey.pem")))
else:
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'], None)
else:
self.assertEqual(mock_client.obtain_certificate.call_count, 0)
except:
self._dump_log()
raise
finally:
if log_out:
with open(os.path.join(self.config.logs_dir, "letsencrypt.log")) as lf:
self.assertTrue(log_out in lf.read())
return mock_lineage, mock_get_utility, stdout
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal(self, _):
lineage, get_utility, _ = self._test_renewal_common(True, [])
self.assertEqual(lineage.save_successor.call_count, 1)
lineage.update_all_links_to.assert_called_once_with(
lineage.latest_common_version())
cert_msg = get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertTrue('donate' in get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.log.logging.handlers.RotatingFileHandler.doRollover')
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal_triggers(self, _, __):
# --dry-run should force renewal
_, get_utility, _ = self._test_renewal_common(False, ['--dry-run', '--keep'],
log_out="simulating renewal")
self.assertEqual(get_utility().add_message.call_count, 1)
self.assertTrue('dry run' in get_utility().add_message.call_args[0][0])
self._test_renewal_common(False, ['--renew-by-default', '-tvv', '--debug'],
log_out="Auto-renewal forced")
self.assertEqual(get_utility().add_message.call_count, 1)
self._test_renewal_common(False, ['-tvv', '--debug', '--keep'],
log_out="not yet due", should_renew=False)
def _dump_log(self):
print("Logs:")
log_path = os.path.join(self.config.logs_dir, "letsencrypt.log")
if os.path.exists(log_path):
with open(log_path) as lf:
print(lf.read())
def test_renew_verb(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
def test_reuse_key(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('certbot._internal.storage.RenewableCert.save_successor')
def test_reuse_key_no_dry_run(self, unused_save_successor):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('sys.stdin')
def test_noninteractive_renewal_delay(self, stdin):
stdin.isatty.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 1)
# in main.py:
# sleep_time = random.randint(1, 60*8)
sleep_call_arg = self.mock_sleep.call_args[0][0]
self.assertTrue(1 <= sleep_call_arg <= 60*8)
@mock.patch('sys.stdin')
def test_interactive_no_renewal_delay(self, stdin):
stdin.isatty.return_value = True
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 0)
@mock.patch('certbot._internal.renewal.should_renew')
def test_renew_skips_recent_certs(self, should_renew):
should_renew.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
expiry = datetime.datetime.now() + datetime.timedelta(days=90)
_, _, stdout = self._test_renewal_common(False, extra_args=None, should_renew=False,
args=['renew'], expiry_date=expiry)
self.assertTrue('No renewals were attempted.' in stdout.getvalue())
self.assertTrue('The following certificates are not due for renewal yet:' in stdout.getvalue())
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_quiet_renew(self, _):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertTrue("renew" in out)
args = ["renew", "--dry-run", "-q"]
_, _, stdout = self._test_renewal_common(True, [], args=args,
should_renew=True, quiet_mode=True)
out = stdout.getvalue()
self.assertEqual("", out)
def test_renew_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command"]
self._test_renewal_common(True, [], args=args, should_renew=False,
error_expected=True)
def test_renew_no_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command",
"--disable-hook-validation"]
with mock.patch("certbot._internal.hooks.post_hook"):
self._test_renewal_common(True, [], args=args, should_renew=True,
error_expected=False)
def test_renew_verb_empty_config(self):
rd = os.path.join(self.config.config_dir, 'renewal')
if not os.path.exists(rd):
filesystem.makedirs(rd)
with open(os.path.join(rd, 'empty.conf'), 'w'):
pass # leave the file empty
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(False, [], args=args, should_renew=False, error_expected=True)
def test_renew_with_certname(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
self._test_renewal_common(True, [], should_renew=True,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'])
def test_renew_with_bad_certname(self):
self._test_renewal_common(True, [], should_renew=False,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'],
error_expected=True)
def _make_dummy_renewal_config(self):
renewer_configs_dir = os.path.join(self.config.config_dir, 'renewal')
filesystem.makedirs(renewer_configs_dir)
with open(os.path.join(renewer_configs_dir, 'test.conf'), 'w') as f:
f.write("My contents don't matter")
def _test_renew_common(self, renewalparams=None, names=None,
assert_oc_called=None, **kwargs):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somepath/fullchain.pem"
if renewalparams is not None:
mock_lineage.configuration = {'renewalparams': renewalparams}
if names is not None:
mock_lineage.names.return_value = names
mock_rc.return_value = mock_lineage
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
kwargs.setdefault('args', ['renew'])
self._test_renewal_common(True, None, should_renew=False, **kwargs)
if assert_oc_called is not None:
if assert_oc_called:
self.assertTrue(mock_renew_cert.called)
else:
self.assertFalse(mock_renew_cert.called)
def test_renew_no_renewalparams(self):
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_no_authenticator(self):
self._test_renew_common(renewalparams={}, assert_oc_called=False,
error_expected=True)
def test_renew_with_bad_int(self):
renewalparams = {'authenticator': 'webroot',
'rsa_key_size': 'over 9000'}
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
assert_oc_called=False)
def test_renew_with_nonetype_http01(self):
renewalparams = {'authenticator': 'webroot',
'http01_port': 'None'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_bad_domain(self):
renewalparams = {'authenticator': 'webroot'}
names = ['uniçodé.com']
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
names=names, assert_oc_called=False)
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
def test_renew_with_configurator(self, mock_sel):
mock_sel.return_value = (mock.MagicMock(), mock.MagicMock())
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args='renew --configurator apache'.split())
def test_renew_plugin_config_restoration(self):
renewalparams = {'authenticator': 'webroot',
'webroot_path': 'None',
'webroot_imaginary_flag': '42'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_webroot_map(self):
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args=['renew', '--webroot-map', json.dumps({'example.com': tempfile.gettempdir()})])
def test_renew_reconstitute_error(self):
with mock.patch('certbot._internal.main.renewal._reconstitute') as mock_reconstitute:
mock_reconstitute.side_effect = Exception
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_obtain_cert_error(self):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somewhere/fullchain.pem"
mock_rc.return_value = mock_lineage
mock_lineage.configuration = {
'renewalparams': {'authenticator': 'webroot'}}
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
mock_renew_cert.side_effect = Exception
self._test_renewal_common(True, None, error_expected=True,
args=['renew'], should_renew=False)
def test_renew_with_bad_cli_args(self):
self._test_renewal_common(True, None, args='renew -d example.com'.split(),
should_renew=False, error_expected=True)
self._test_renewal_common(True, None, args='renew --csr {0}'.format(CSR).split(),
should_renew=False, error_expected=True)
def test_no_renewal_with_hooks(self):
_, _, stdout = self._test_renewal_common(
due_for_renewal=False, extra_args=None, should_renew=False,
args=['renew', '--post-hook',
'{0} -c "print(\'hello world\');"'
.format(sys.executable)])
self.assertTrue('No hooks were run.' in stdout.getvalue())
@test_util.patch_get_utility()
@mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname')
@mock.patch('certbot._internal.main._init_le_client')
@mock.patch('certbot._internal.main._report_new_cert')
def test_certonly_reinstall(self, mock_report_new_cert, mock_init,
mock_renewal, mock_get_utility):
mock_renewal.return_value = ('reinstall', mock.MagicMock())
mock_init.return_value = mock_client = mock.MagicMock()
self._call(['-d', 'foo.bar', '-a', 'standalone', 'certonly'])
self.assertFalse(mock_client.obtain_certificate.called)
self.assertFalse(mock_client.obtain_and_enroll_certificate.called)
self.assertEqual(mock_get_utility().add_message.call_count, 0)
mock_report_new_cert.assert_not_called()
def _test_certonly_csr_common(self, extra_args=None):
certr = 'certr'
chain = 'chain'
mock_client = mock.MagicMock()
mock_client.obtain_certificate_from_csr.return_value = (certr, chain)
cert_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/cert_512.pem'))
full_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/fullchain.pem'))
mock_client.save_certificate.return_value = cert_path, None, full_path
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
chain_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/chain.pem'))
args = ('-a standalone certonly --csr {0} --cert-path {1} '
'--chain-path {2} --fullchain-path {3}').format(
CSR, cert_path, chain_path, full_path).split()
if extra_args:
args += extra_args
with mock.patch('certbot._internal.main.crypto_util'):
self._call(args)
if '--dry-run' in args:
self.assertFalse(mock_client.save_certificate.called)
else:
mock_client.save_certificate.assert_called_once_with(
certr, chain, cert_path, chain_path, full_path)
return mock_get_utility
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_csr(self, mock_subscription):
mock_get_utility = self._test_certonly_csr_common()
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertFalse('Your key file has been saved at' in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
def test_certonly_csr_dry_run(self):
mock_get_utility = self._test_certonly_csr_common(['--dry-run'])
self.assertEqual(mock_get_utility().add_message.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_key(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
server = 'foo.bar'
self._call_no_clientmock(['--cert-path', SS_CERT_PATH, '--key-path', RSA2048_KEY_PATH,
'--server', server, 'revoke'])
with open(RSA2048_KEY_PATH, 'rb') as f:
mock_acme_client.BackwardsCompatibleClientV2.assert_called_once_with(
mock.ANY, jose.JWK.load(f.read()), server)
with open(SS_CERT_PATH, 'rb') as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
def test_revoke_with_key_mismatch(self):
server = 'foo.bar'
self.assertRaises(errors.Error, self._call_no_clientmock,
['--cert-path', CERT, '--key-path', KEY,
'--server', server, 'revoke'])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main._determine_account')
def test_revoke_without_key(self, mock_determine_account,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_determine_account.return_value = (mock.MagicMock(), None)
_, _, _, client = self._call(['--cert-path', CERT, 'revoke'])
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = client.acme_from_config_key().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_register(self, _):
with mock.patch('certbot._internal.main.client') as mocked_client:
acc = mock.MagicMock()
acc.id = "imaginary_account"
mocked_client.register.return_value = (acc, "worked")
self._call_no_clientmock(["register", "--email", "user@example.org"])
with mock.patch('certbot._internal.main.account') as mocked_account:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = ["an account"]
x = self._call_no_clientmock(["register", "--email", "user@example.org"])
self.assertTrue("There is an existing account" in x[0])
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
@mock.patch('certbot._internal.updater._run_updaters')
def test_plugin_selection_error(self, mock_run, mock_choose):
mock_choose.side_effect = errors.PluginSelectionError
self.assertRaises(errors.PluginSelectionError, main.renew_cert,
None, None, None)
self.config.dry_run = False
updater.run_generic_updaters(self.config, None, None)
# without installer
self.assertFalse(mock_run.called)
class UnregisterTest(unittest.TestCase):
def setUp(self):
self.patchers = {
'_determine_account': mock.patch('certbot._internal.main._determine_account'),
'account': mock.patch('certbot._internal.main.account'),
'client': mock.patch('certbot._internal.main.client'),
'get_utility': test_util.patch_get_utility()}
self.mocks = {k: v.start() for k, v in self.patchers.items()}
def tearDown(self):
for patch in self.patchers.values():
patch.stop()
def test_abort_unregister(self):
self.mocks['account'].AccountFileStorage.return_value = mock.Mock()
util_mock = self.mocks['get_utility']()
util_mock.yesno.return_value = False
config = mock.Mock()
unused_plugins = mock.Mock()
res = main.unregister(config, unused_plugins)
self.assertEqual(res, "Deactivation aborted.")
@mock.patch("certbot._internal.main.display_util.notify")
def test_unregister(self, mock_notify):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = ["an account"]
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
self.mocks['_determine_account'].return_value = (mock.MagicMock(), "foo")
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
self.assertTrue(res is None)
mock_notify.assert_called_once_with("Account deactivated.")
def test_unregister_no_account(self):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
m = "Could not find existing account to deactivate."
self.assertEqual(res, m)
self.assertFalse(cb_client.acme.deactivate_registration.called)
class MakeOrVerifyNeededDirs(test_util.ConfigTestCase):
@mock.patch("certbot._internal.main.util")
def test_it(self, mock_util):
main.make_or_verify_needed_dirs(self.config)
for core_dir in (self.config.config_dir, self.config.work_dir,):
mock_util.set_up_core_dir.assert_any_call(
core_dir, constants.CONFIG_DIRS_MODE,
self.config.strict_permissions
)
hook_dirs = (self.config.renewal_pre_hooks_dir,
self.config.renewal_deploy_hooks_dir,
self.config.renewal_post_hooks_dir,)
for hook_dir in hook_dirs:
# default mode of 755 is used
mock_util.make_or_verify_dir.assert_any_call(
hook_dir, strict=self.config.strict_permissions)
class EnhanceTest(test_util.ConfigTestCase):
def setUp(self):
super().setUp()
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.cert_manager.get_certnames') as mock_certs:
mock_certs.return_value = ['example.com']
with mock.patch('certbot._internal.cert_manager.domains_for_certname') as mock_dom:
mock_dom.return_value = ['example.com']
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_client = mock.MagicMock()
mock_client.config = config
mock_init.return_value = mock_client
main.enhance(config, plugins)
return mock_client # returns the client
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_question(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ['example.com']
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer') as mock_pick:
self._call(['enhance', '--redirect'])
self.assertTrue(mock_pick.called)
# Check that the message includes "enhancements"
self.assertTrue("enhancements" in mock_pick.call_args[0][3])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_auth_warning(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
with mock.patch('certbot._internal.main.plug_sel.logger.warning') as mock_log:
mock_client = self._call(['enhance', '-a', 'webroot', '--redirect'])
self.assertTrue(mock_log.called)
self.assertTrue("make sense" in mock_log.call_args[0][0])
self.assertTrue(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_config_call(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect', '--hsts'])
req_enh = ["redirect", "hsts"]
not_req_enh = ["uir"]
self.assertTrue(mock_client.enhance_config.called)
self.assertTrue(
all(getattr(mock_client.config, e) for e in req_enh))
self.assertFalse(
any(getattr(mock_client.config, e) for e in not_req_enh))
self.assertTrue(
"example.com" in mock_client.enhance_config.call_args[0][0])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_noninteractive(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(
chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect',
'--hsts', '--non-interactive'])
self.assertTrue(mock_client.enhance_config.called)
self.assertFalse(mock_choose.called)
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_user_abort_domains(self, _rec, mock_choose):
mock_choose.return_value = []
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
self.assertRaises(errors.Error,
self._call,
['enhance', '--redirect', '--hsts'])
def test_no_enhancements_defined(self):
self.assertRaises(errors.MisconfigurationError,
self._call, ['enhance', '-a', 'null'])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_plugin_selection_error(self, _rec, mock_choose, mock_pick):
mock_choose.return_value = ["example.com"]
mock_pick.return_value = (None, None)
mock_pick.side_effect = errors.PluginSelectionError()
mock_client = self._call(['enhance', '--hsts'])
self.assertFalse(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = self.mockinstaller
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self._call(['enhance', '--auto-hsts'])
self.assertTrue(self.mockinstaller.enable_autohsts.called)
self.assertEqual(self.mockinstaller.enable_autohsts.call_args[0][1],
["example.com", "another.tld"])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable_not_supported(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = null.Installer(self.config, "null")
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self.assertRaises(
errors.NotSupportedError,
self._call, ['enhance', '--auto-hsts'])
def test_enhancement_enable_conflict(self):
self.assertRaises(
errors.Error,
self._call, ['enhance', '--auto-hsts', '--hsts'])
class InstallTest(test_util.ConfigTestCase):
def setUp(self):
super().setUp()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_not_supported(self, mock_inst, _rec):
mock_inst.return_value = null.Installer(self.config, "null")
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = "nonexistent"
self.assertRaises(errors.NotSupportedError,
main.install,
self.config, plugins)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_no_certname(self, mock_inst, _rec):
mock_inst.return_value = self.mockinstaller
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = None
self.config.key_path = "/tmp/nonexistent"
self.config.cert_path = "/tmp/nonexistent"
self.assertRaises(errors.ConfigurationError,
main.install,
self.config, plugins)
class UpdateAccountTest(test_util.ConfigTestCase):
def setUp(self):
patches = {
'account': mock.patch('certbot._internal.main.account'),
'atexit': mock.patch('certbot.util.atexit'),
'client': mock.patch('certbot._internal.main.client'),
'determine_account': mock.patch('certbot._internal.main._determine_account'),
'notify': mock.patch('certbot._internal.main.display_util.notify'),
'prepare_sub': mock.patch('certbot._internal.eff.prepare_subscription'),
'util': test_util.patch_get_utility()
}
self.mocks = { k: patches[k].start() for k in patches }
for patch in patches.values():
self.addCleanup(patch.stop)
return super().setUp()
def _call(self, args):
with mock.patch('certbot._internal.main.sys.stdout'), \
mock.patch('certbot._internal.main.sys.stderr'):
args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text'] + args
return main.main(args[:]) # NOTE: parser can alter its args!
def _prepare_mock_account(self):
mock_storage = mock.MagicMock()
mock_account = mock.MagicMock()
mock_regr = mock.MagicMock()
mock_storage.find_all.return_value = [mock_account]
self.mocks['account'].AccountFileStorage.return_value = mock_storage
mock_account.regr.body = mock_regr.body
self.mocks['determine_account'].return_value = (mock_account, mock.MagicMock())
return (mock_account, mock_storage, mock_regr)
def _test_update_no_contact(self, args):
(_, mock_storage, mock_regr) = self._prepare_mock_account()
result = self._call(args)
# When update succeeds, the return value of update_account() is None
self.assertIsNone(result)
# We submitted a registration to the server
self.assertEqual(self.mocks['client'].Client().acme.update_registration.call_count, 1)
mock_regr.body.update.assert_called_with(contact=())
# We got an update from the server and persisted it
self.assertEqual(mock_storage.update_regr.call_count, 1)
# We should have notified the user
self.mocks['notify'].assert_called_with(
'Any contact information associated with this account has been removed.'
)
# We should not have called subscription because there's no email
self.mocks['prepare_sub'].assert_not_called()
def test_no_existing_accounts(self):
mock_storage = mock.MagicMock()
mock_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mock_storage
self.assertEqual(self._call(['update_account', '--email', 'user@example.org']),
'Could not find an existing account to update.')
def test_update_account_remove_email(self):
self._test_update_no_contact(['update_account', '--register-unsafely-without-email'])
def test_update_account_empty_email(self):
self._test_update_no_contact(['update_account', '-m', ''])
@mock.patch('certbot._internal.main.display_ops.get_email')
def test_update_account_with_email(self, mock_email):
mock_email.return_value = 'user@example.com'
(_, mock_storage, _) = self._prepare_mock_account()
mock_client = mock.MagicMock()
self.mocks['client'].Client.return_value = mock_client
result = self._call(['update_account'])
self.assertIsNone(result)
self.assertEqual(mock_client.acme.update_registration.call_count, 1)
self.assertEqual(mock_storage.update_regr.call_count, 1)
self.assertEqual(self.mocks['prepare_sub'].call_count, 1)
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to user@example.com.')
def test_update_account_with_multiple_emails(self):
(_, mock_storage, mock_regr) = self._prepare_mock_account()
self.assertIsNone(
self._call(['update_account', '-m', 'user@example.com,user@example.org'])
)
mock_regr.body.update.assert_called_with(
contact=['mailto:user@example.com', 'mailto:user@example.org']
)
self.assertEqual(mock_storage.update_regr.call_count, 1)
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to user@example.com,user@example.org.')
if __name__ == '__main__':
unittest.main()
| true
| true
|
7908cff36e5246ae97dc46db575d03d36ff29e4a
| 3,622
|
py
|
Python
|
devp2p/tests/test_crypto.py
|
anshulkusa/pyquarkchain
|
af80b6fdd331c69ce1bc801caf7c2cdd1e82a435
|
[
"MIT"
] | 2
|
2018-10-22T10:52:56.000Z
|
2018-12-16T06:47:58.000Z
|
devp2p/tests/test_crypto.py
|
anshulkusa/pyquarkchain
|
af80b6fdd331c69ce1bc801caf7c2cdd1e82a435
|
[
"MIT"
] | null | null | null |
devp2p/tests/test_crypto.py
|
anshulkusa/pyquarkchain
|
af80b6fdd331c69ce1bc801caf7c2cdd1e82a435
|
[
"MIT"
] | 2
|
2018-10-25T04:46:09.000Z
|
2020-06-08T21:24:42.000Z
|
# -*- coding: utf-8 -*-
from devp2p import crypto
from quarkchain.rlp.utils import decode_hex
import random
import pytest
def get_ecc(secret=b''):
return crypto.ECCx(raw_privkey=crypto.mk_privkey(secret))
def test_valid_ecc():
for i in range(100):
e = get_ecc()
assert len(e.raw_pubkey) == 64
assert e.is_valid_key(e.raw_pubkey)
assert e.is_valid_key(e.raw_pubkey, e.raw_privkey)
pubkey = '\x00' * 64
assert not e.is_valid_key(pubkey)
def test_asymetric():
bob = get_ecc(b'secret2')
# enc / dec
plaintext = b"Hello Bob"
ciphertext = crypto.encrypt(plaintext, bob.raw_pubkey)
assert bob.decrypt(ciphertext) == plaintext
def test_signature():
bob = get_ecc(b'secret2')
# sign
message = crypto.sha3(b"Hello Alice")
signature = bob.sign(message)
# verify signature
assert crypto.verify(bob.raw_pubkey, signature, message) is True
assert crypto.ECCx(raw_pubkey=bob.raw_pubkey).verify(signature, message) is True
# wrong signature
message = crypto.sha3(b"Hello Alicf")
assert crypto.ECCx(raw_pubkey=bob.raw_pubkey).verify(signature, message) is False
assert crypto.verify(bob.raw_pubkey, signature, message) is False
def test_recover():
alice = get_ecc(b'secret1')
message = crypto.sha3(b'hello bob')
signature = alice.sign(message)
assert len(signature) == 65
assert crypto.verify(alice.raw_pubkey, signature, message) is True
recovered_pubkey = crypto.ecdsa_recover(message, signature)
assert len(recovered_pubkey) == 64
assert alice.raw_pubkey == recovered_pubkey
def test_get_ecdh_key():
privkey = decode_hex("332143e9629eedff7d142d741f896258f5a1bfab54dab2121d3ec5000093d74b")
remote_pubkey = decode_hex("f0d2b97981bd0d415a843b5dfe8ab77a30300daab3658c578f2340308a2da1a07f0821367332598b6aa4e180a41e92f4ebbae3518da847f0b1c0bbfe20bcf4e1")
agree_expected = decode_hex("ee1418607c2fcfb57fda40380e885a707f49000a5dda056d828b7d9bd1f29a08")
e = crypto.ECCx(raw_privkey=privkey)
agree = e.get_ecdh_key(remote_pubkey)
assert agree == agree_expected
def test_en_decrypt():
alice = crypto.ECCx()
bob = crypto.ECCx()
msg = b'test'
ciphertext = alice.encrypt(msg, bob.raw_pubkey)
assert bob.decrypt(ciphertext) == msg
def test_en_decrypt_shared_mac_data():
alice, bob = crypto.ECCx(), crypto.ECCx()
ciphertext = alice.encrypt('test', bob.raw_pubkey, shared_mac_data='shared mac data')
assert bob.decrypt(ciphertext, shared_mac_data=b'shared mac data') == b'test'
@pytest.mark.xfail(raises=crypto.ECIESDecryptionError)
def test_en_decrypt_shared_mac_data_fail():
alice, bob = crypto.ECCx(), crypto.ECCx()
ciphertext = alice.encrypt('test', bob.raw_pubkey, shared_mac_data='shared mac data')
bob.decrypt(ciphertext, shared_mac_data=b'wrong')
def test_privtopub():
priv = crypto.mk_privkey(b'test')
pub = crypto.privtopub(priv)
pub2 = crypto.ECCx(raw_privkey=priv).raw_pubkey
assert pub == pub2
def recover_1kb(times=1000):
alice = get_ecc(b'secret1')
message = ''.join(chr(random.randrange(0, 256)) for i in range(1024))
message = crypto.sha3(message.encode('utf-8'))
signature = alice.sign(message)
for i in range(times):
recovered_pubkey = crypto.ecdsa_recover(message, signature)
assert recovered_pubkey == alice.raw_pubkey
def test_recover2():
recover_1kb(times=1)
if __name__ == '__main__':
import time
st = time.time()
times = 100
recover_1kb(times=times)
print('took %.5f per recovery' % ((time.time() - st) / times))
| 30.694915
| 162
| 0.717835
|
from devp2p import crypto
from quarkchain.rlp.utils import decode_hex
import random
import pytest
def get_ecc(secret=b''):
return crypto.ECCx(raw_privkey=crypto.mk_privkey(secret))
def test_valid_ecc():
for i in range(100):
e = get_ecc()
assert len(e.raw_pubkey) == 64
assert e.is_valid_key(e.raw_pubkey)
assert e.is_valid_key(e.raw_pubkey, e.raw_privkey)
pubkey = '\x00' * 64
assert not e.is_valid_key(pubkey)
def test_asymetric():
bob = get_ecc(b'secret2')
plaintext = b"Hello Bob"
ciphertext = crypto.encrypt(plaintext, bob.raw_pubkey)
assert bob.decrypt(ciphertext) == plaintext
def test_signature():
bob = get_ecc(b'secret2')
message = crypto.sha3(b"Hello Alice")
signature = bob.sign(message)
assert crypto.verify(bob.raw_pubkey, signature, message) is True
assert crypto.ECCx(raw_pubkey=bob.raw_pubkey).verify(signature, message) is True
message = crypto.sha3(b"Hello Alicf")
assert crypto.ECCx(raw_pubkey=bob.raw_pubkey).verify(signature, message) is False
assert crypto.verify(bob.raw_pubkey, signature, message) is False
def test_recover():
alice = get_ecc(b'secret1')
message = crypto.sha3(b'hello bob')
signature = alice.sign(message)
assert len(signature) == 65
assert crypto.verify(alice.raw_pubkey, signature, message) is True
recovered_pubkey = crypto.ecdsa_recover(message, signature)
assert len(recovered_pubkey) == 64
assert alice.raw_pubkey == recovered_pubkey
def test_get_ecdh_key():
privkey = decode_hex("332143e9629eedff7d142d741f896258f5a1bfab54dab2121d3ec5000093d74b")
remote_pubkey = decode_hex("f0d2b97981bd0d415a843b5dfe8ab77a30300daab3658c578f2340308a2da1a07f0821367332598b6aa4e180a41e92f4ebbae3518da847f0b1c0bbfe20bcf4e1")
agree_expected = decode_hex("ee1418607c2fcfb57fda40380e885a707f49000a5dda056d828b7d9bd1f29a08")
e = crypto.ECCx(raw_privkey=privkey)
agree = e.get_ecdh_key(remote_pubkey)
assert agree == agree_expected
def test_en_decrypt():
alice = crypto.ECCx()
bob = crypto.ECCx()
msg = b'test'
ciphertext = alice.encrypt(msg, bob.raw_pubkey)
assert bob.decrypt(ciphertext) == msg
def test_en_decrypt_shared_mac_data():
alice, bob = crypto.ECCx(), crypto.ECCx()
ciphertext = alice.encrypt('test', bob.raw_pubkey, shared_mac_data='shared mac data')
assert bob.decrypt(ciphertext, shared_mac_data=b'shared mac data') == b'test'
@pytest.mark.xfail(raises=crypto.ECIESDecryptionError)
def test_en_decrypt_shared_mac_data_fail():
alice, bob = crypto.ECCx(), crypto.ECCx()
ciphertext = alice.encrypt('test', bob.raw_pubkey, shared_mac_data='shared mac data')
bob.decrypt(ciphertext, shared_mac_data=b'wrong')
def test_privtopub():
priv = crypto.mk_privkey(b'test')
pub = crypto.privtopub(priv)
pub2 = crypto.ECCx(raw_privkey=priv).raw_pubkey
assert pub == pub2
def recover_1kb(times=1000):
alice = get_ecc(b'secret1')
message = ''.join(chr(random.randrange(0, 256)) for i in range(1024))
message = crypto.sha3(message.encode('utf-8'))
signature = alice.sign(message)
for i in range(times):
recovered_pubkey = crypto.ecdsa_recover(message, signature)
assert recovered_pubkey == alice.raw_pubkey
def test_recover2():
recover_1kb(times=1)
if __name__ == '__main__':
import time
st = time.time()
times = 100
recover_1kb(times=times)
print('took %.5f per recovery' % ((time.time() - st) / times))
| true
| true
|
7908d0d264a16291805144397c065b26fa2f7b36
| 17,246
|
py
|
Python
|
quex/engine/state_machine/transformation/state_split.py
|
smmckay/quex-mirror
|
7d75ed560e9f3a591935e59243188676eecb112a
|
[
"MIT"
] | null | null | null |
quex/engine/state_machine/transformation/state_split.py
|
smmckay/quex-mirror
|
7d75ed560e9f3a591935e59243188676eecb112a
|
[
"MIT"
] | null | null | null |
quex/engine/state_machine/transformation/state_split.py
|
smmckay/quex-mirror
|
7d75ed560e9f3a591935e59243188676eecb112a
|
[
"MIT"
] | null | null | null |
"""State-Split Transformation
-----------------------------
(C) Frank-Rene Schaefer
The 'State-Split' is a procedure transforms a state machine that triggers on
some 'pure' values (e.g. Unicode Characters) into a state machine that triggers
on the code unit sequences (e.g. UTF8 Code Units) that correspond to the
original values. For example, a state transition on a Unicode Character
'0x1329D' as shown below,
[ A ]--->( 0x1329D )---->[ B ]
is translated into a sequence of UTF16 transitions with a new intermediate
state 'i' as follows.
[ A ]--( 0xD80C )-->[ i ]-->( 0xDE9E )-->[ B ]
This is so, since the character 0x1329D in Unicode is represented as the
sequence 0xD80C, 0xDE9E. The present algorithm exploits the fact that
translations of adjacent character result in sequences of adjacent intervals.
.----------------------------------------------------------------------------.
| This procedure is to be used for encodings of dynamic size, i.e. where the |
| number of code units to represent a 'pure' value changes depending on the |
| value itself (e.g. UTF8, UTF16). |
'----------------------------------------------------------------------------'
PRINCIPLE:
A state transition is described by a 'trigger set' and a target state. If an
input occurs that belongs to the 'trigger set' the state machine transits into
the specific target state. Trigger sets are composed of one ore more intervals
of adjacent values. If the encoding has some type of continuity, it can be
assumed that an interval in the pure values can be represented by a sequence of
intervals in the transformed state machine. This is, indeed true for the
encodings UTF8 and UTF16.
The algorithm below considers intervals of pure values and translates them
into interval sequences. All interval sequences of a triggger set that
triggers to a target state are then combined into a set of state transitions.
A unicode transition from state A to state B:
[ A ]-->(x0, x1)-->[ B ]
is translated into a chain of utf8-byte sequence transitions that might look
like this
[ A ]-->(b0)-->[ 1 ]-->(c0,c1)-->[ B ]
\ /
`->(d1)-->[ 2 ]---(e0,e1)---'
That means that intermediate states may be introduced to reflect the different
byte sequences that represent the original interval.
IDEAS:
In a simple approach one would translate each element of a interval into an
utf8-byte sequence and generate state transitions between A and B. Such an
approach, however, produces a huge computational overhead and charges the later
Hopcroft Minimization with a huge state machine.
To avoid such an hughe computational effort, the Hopcroft Minimzation can be
prepared on the basis of transition intervals.
(A) Backwards: In somewhat greater intervals, the following might occur:
.-->(d1)-->[ 1 ]---(A3,BF)---.
/ \
/ ,->(d1)-->[ 2 ]---(80,BF)--. \
/ / \ \
[ A ]-->(b0)-->[ 3 ]-->(80,BF)-->[ B ]
\ /
`->(d1)-->[ 4 ]---(80,81)---'
That means, that for states 2 and 3 the last transition is on [80, BF]
to state B. Thus, the intermediate states 2 and 3 are equivalent. Both
can be replaced by a single state.
(B) Forwards: The first couple of bytes in the correspondent utf8 sequences
might be the same. Then, no branch is required until the first differing
byte.
PROCESS:
(1) The original interval translated into a list of interval sequence
that represent the values in the target encoding.
(2) The interval sequences are plugged in between the state A and B
of the state machine.
"""
from quex.engine.state_machine.state.core import DFA_State
import quex.engine.state_machine.transformation.base as base
import quex.engine.state_machine.index as state_machine_index
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.misc.tools import flatten_list_of_lists
from collections import defaultdict
class EncodingTrafoBySplit(base.EncodingTrafo):
"""Transformation that takes a lexatom and produces a lexatom sequence.
"""
def __init__(self, Name, ErrorRangeByCodeUnitDb):
base.EncodingTrafo.__init__(self, Name,
NumberSet.from_range(0, 0x110000),
ErrorRangeByCodeUnitDb)
def do_transition(self, from_target_map, FromSi, ToSi, BadLexatomSi):
"""Translates to transition 'FromSi' --> 'ToSi' inside the state
machine according to the specific coding (see derived class, i.e.
UTF8 or UTF16).
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURNS: [0] True if complete, False else.
[1] StateDb of newly generated states.
"""
number_set = from_target_map[ToSi]
# Check whether a modification is necessary
if number_set.least_greater_bound() <= self.UnchangedRange:
# 'UnchangedRange' => No change to numerical values.
return True, None
if not self.cut_forbidden_range(number_set):
# 'number_set' solely contains forbidden elements.
del from_target_map[ToSi]
return False, None
transformed_interval_sequence_list = flatten_list_of_lists(
self.get_interval_sequences(interval)
for interval in number_set.get_intervals(PromiseToTreatWellF=True)
)
# Second, enter the new transitions.
new_target_map, \
new_state_db = self.plug_interval_sequences(FromSi, ToSi,
transformed_interval_sequence_list,
BadLexatomSi)
# Absorb new transitions into the target map of the 'from state'.
del from_target_map[ToSi]
from_target_map.update(new_target_map)
return True, new_state_db
def _do_single(self, Code):
number_set = NumberSet.from_range(Code, Code+1)
if number_set.is_empty():
return -1
interval_list = number_set.get_intervals(PromiseToTreatWellF=True)
assert len(interval_list) == 1
interval_sequence_list = self.get_interval_sequences(interval_list[0])
# A single code element can only produce a single interval sequence!
assert len(interval_sequence_list) == 1
assert all(x.size() == 1 for x in interval_sequence_list[0])
return [x.begin for x in interval_sequence_list[0]]
def variable_character_sizes_f(self):
return True
def lexatom_n_per_character_in_state_machine(self, SM):
lexatom_n = None
for state in SM.states.itervalues():
for number_set in state.target_map.get_map().itervalues():
candidate_lexatom_n = self.lexatom_n_per_character(number_set)
if candidate_lexatom_n is None: return None
elif lexatom_n is None: lexatom_n = candidate_lexatom_n
elif lexatom_n != candidate_lexatom_n: return None
return lexatom_n
def hopcroft_minimization_always_makes_sense(self):
return True
def plug_interval_sequences(self, FromSi, ToSi, IntervalSequenceList,
BadLexatomSi):
"""Transform the list of interval sequences into intermediate state
transitions.
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURN: [0] Target map update for the first state.
[1] State Db update for intermediate states.
"""
def simplify(tm_db, tm_end_inv, ToSi):
"""Those states which trigger on the same intervals to 'ToSi' are
equivalent, i.e. can replaced by one state.
"""
# Find the states that trigger on the same interval list to the
# terminal 'ToSi'.
equivalence_db = {}
replacement_db = {}
for from_si, interval_list in tm_end_inv.iteritems():
key = tuple(sorted(interval_list))
equivalent_si = equivalence_db.get(key)
if equivalent_si is None: equivalence_db[key] = from_si
else: replacement_db[from_si] = equivalent_si
# Replace target states which are equivalent
result = {}
for from_si, tm in tm_db.iteritems():
new_tm = defaultdict(NumberSet)
for target_si, interval in tm.iteritems():
replacement_si = replacement_db.get(target_si)
if replacement_si is not None: target_si = replacement_si
new_tm[target_si].quick_append_interval(interval)
if any(number_set.is_empty() for si, number_set in new_tm.items()):
for si, number_set in new_tm.iteritems():
print "#sim", si, number_set
if from_si in tm_end_inv:
for interval in tm_end_inv[from_si]:
new_tm[ToSi].quick_append_interval(interval)
result[from_si] = new_tm
return result
tm_db, \
tm_end_inv, \
position_db = _get_intermediate_transition_maps(FromSi, ToSi,
IntervalSequenceList)
result_tm_db = simplify(tm_db, tm_end_inv, ToSi)
if BadLexatomSi is not None:
for si, position in position_db.iteritems():
# The 'positon 0' is done by 'do_state_machine'. It is concerned
# with the first state's transition.
assert position != 0
self._add_transition_to_bad_lexatom_detector(result_tm_db[si],
BadLexatomSi,
position)
for tm in result_tm_db.itervalues():
assert not any(number_set.is_empty() for number_set in tm.itervalues())
# Generate the target map to be inserted into state 'FromSi'.
# Generate list of intermediate states that implement the sequence
# of intervals.
first_tm = result_tm_db.pop(FromSi)
new_state_db = dict(
(si, DFA_State.from_TargetMap(tm)) for si, tm in result_tm_db.iteritems()
)
return first_tm, new_state_db
def __bunch_iterable(IntervalSequenceList, Index):
"""Iterate over sub-bunches of sequence in 'IntervalSequenceList' which are
the same at the given 'Position'. The 'IntervalSequenceList' must be sorted!
That is, same intervals must be adjacent.
EXAMPLE:
Index = 1
IntervalSequenceList = [
[ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ],
[ interval02, interval12, interval22, interval30 ],
[ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
That is, the interval sequences are grouped according to groups where the
second interval (Index=1) is equal, the yields are as follows:
(1) [ [ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ] ]
(2) [ [ interval02, interval12, interval22, interval30 ] ]
(3) [ [ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
NOTE: Two sequences of different lengths are *never* grouped together
-- by purpose.
The index is provided in order to avoid the creation of shorted sub-
sequences. Instead, the caller focusses on sub-sequences behind 'Index'.
Obviously, this function only makes sense if the intervals before 'Index'
are all the same.
YIELDS: [0] Interval which is the same for group of sequenes at 'Index'.
[1] Group of sequences.
[2] 'LastF' -- telling whether the interval is the last in the
sequence.
"""
prev_interval = None
prev_i = -1
prev_last_f = False
for i, sequence in enumerate(IntervalSequenceList):
interval = sequence[Index]
if interval.is_empty(): print "#bu:", interval; assert False
L = len(sequence)
last_f = L == Index + 1
if interval != prev_interval or last_f != prev_last_f:
if prev_i != -1:
yield prev_interval, IntervalSequenceList[prev_i:i], prev_last_f
prev_i = i
prev_interval = interval
prev_last_f = last_f
yield prev_interval, IntervalSequenceList[prev_i:], prev_last_f
def _get_intermediate_transition_maps(FromSi, ToSi, interval_sequence_list):
"""Several transitions are to be inserted in between state 'FromSi' and
'ToSi'. The transitions result from the list of sequences in
'interval_sequence_list'. This function develops the transition maps
of the states involved. Also, it notifies about the 'position' of each
state in the code unit sequence. Thus, the caller may insert error-detectors
on invalid code units.
FORBIDDEN: There cannot be a sequence that starts with the exact intervals
as a shorter sequences. Example:
[ (0, 1), (0, 2), (0, 3) ] #
[ (0, 1), (0, 2) ] # Bad, very bad!
This would mean that after (0, 1), (0, 2) the 'ToSi' is reached, but then
after (0, 3) again. The result is an *iteration* on 'ToSi'
--(0, 1)-->( A )--(0, 2)-->( ToSi )---->
| |
'-<-(0, 3)--'
Consequently, such a list of interval sequences cannot represent a linear
transition.
RETURNS: [0] Transition Map DB: state_index --> 'TransitionMap'
with TransitionMap: target_state_index --> Interval
That is 'TransitionMap[target_state_index]' tells through which
intervals the 'state_index' triggers to 'target_states'
The 'Transition Map DB' does not contain transitions to the
'ToSi'--the end state.
[1] Inverse End Transition Map:
Transitions to the end state are stored inversely:
from_state_index --> list of Interval-s
The end state can be reached by more than one interval, so a
list of Interval-s is associated with the transition
'from_state_index' to 'ToSi'.
[1] PositionDB: state_index --> position in code unit sequence.
"""
# Sort the list of sequences, so that adjacent intervals are listed one
# after the other. This is necessary for '__bunch_iterable()' to function.
interval_sequence_list.sort()
worklist = [
# The state at 'BeginStateIndex' is concerned with the intervals
# at position '0' in the 'interval_sequence_list'. The list needs to
# be grouped according to the first interval, and for each distinct
# interval a transition to another state must be generated.
(FromSi, interval_sequence_list, 0)
]
tm_db = defaultdict(dict)
tm_end_inv = defaultdict(list)
position_db = {}
while worklist:
si, sequence_group, index = worklist.pop()
# -- State 'si' triggers on intervals at 'index' in 'sequence_group'.
tm = tm_db[si]
# -- State 'si' comes at position 'index' in a sequence of code units.
# (position of 'FromSi' shall not appear in the 'position_db' since
# the error detection of the first state is done in the caller.)
if si != FromSi: position_db[si] = index
# Group the sequences according to the interval at position 'index'.
for interval, sub_group, last_f in __bunch_iterable(sequence_group, index):
# Transit to new state for the given sub-group of sequences.
if not last_f:
# For each 'interval' a deliberate target state is generated.
# => each target state is only reached by a single Interval.
new_si = state_machine_index.get()
tm[new_si] = interval
worklist.append((new_si, sub_group, index+1))
else:
# If the 'interval' is the last in the sequence, the 'ToSi' is
# reached. Obviously this may/should happen more than once.
tm_end_inv[si].append(interval)
return tm_db, tm_end_inv, position_db
| 43.550505
| 91
| 0.606923
|
"""State-Split Transformation
-----------------------------
(C) Frank-Rene Schaefer
The 'State-Split' is a procedure transforms a state machine that triggers on
some 'pure' values (e.g. Unicode Characters) into a state machine that triggers
on the code unit sequences (e.g. UTF8 Code Units) that correspond to the
original values. For example, a state transition on a Unicode Character
'0x1329D' as shown below,
[ A ]--->( 0x1329D )---->[ B ]
is translated into a sequence of UTF16 transitions with a new intermediate
state 'i' as follows.
[ A ]--( 0xD80C )-->[ i ]-->( 0xDE9E )-->[ B ]
This is so, since the character 0x1329D in Unicode is represented as the
sequence 0xD80C, 0xDE9E. The present algorithm exploits the fact that
translations of adjacent character result in sequences of adjacent intervals.
.----------------------------------------------------------------------------.
| This procedure is to be used for encodings of dynamic size, i.e. where the |
| number of code units to represent a 'pure' value changes depending on the |
| value itself (e.g. UTF8, UTF16). |
'----------------------------------------------------------------------------'
PRINCIPLE:
A state transition is described by a 'trigger set' and a target state. If an
input occurs that belongs to the 'trigger set' the state machine transits into
the specific target state. Trigger sets are composed of one ore more intervals
of adjacent values. If the encoding has some type of continuity, it can be
assumed that an interval in the pure values can be represented by a sequence of
intervals in the transformed state machine. This is, indeed true for the
encodings UTF8 and UTF16.
The algorithm below considers intervals of pure values and translates them
into interval sequences. All interval sequences of a triggger set that
triggers to a target state are then combined into a set of state transitions.
A unicode transition from state A to state B:
[ A ]-->(x0, x1)-->[ B ]
is translated into a chain of utf8-byte sequence transitions that might look
like this
[ A ]-->(b0)-->[ 1 ]-->(c0,c1)-->[ B ]
\ /
`->(d1)-->[ 2 ]---(e0,e1)---'
That means that intermediate states may be introduced to reflect the different
byte sequences that represent the original interval.
IDEAS:
In a simple approach one would translate each element of a interval into an
utf8-byte sequence and generate state transitions between A and B. Such an
approach, however, produces a huge computational overhead and charges the later
Hopcroft Minimization with a huge state machine.
To avoid such an hughe computational effort, the Hopcroft Minimzation can be
prepared on the basis of transition intervals.
(A) Backwards: In somewhat greater intervals, the following might occur:
.-->(d1)-->[ 1 ]---(A3,BF)---.
/ \
/ ,->(d1)-->[ 2 ]---(80,BF)--. \
/ / \ \
[ A ]-->(b0)-->[ 3 ]-->(80,BF)-->[ B ]
\ /
`->(d1)-->[ 4 ]---(80,81)---'
That means, that for states 2 and 3 the last transition is on [80, BF]
to state B. Thus, the intermediate states 2 and 3 are equivalent. Both
can be replaced by a single state.
(B) Forwards: The first couple of bytes in the correspondent utf8 sequences
might be the same. Then, no branch is required until the first differing
byte.
PROCESS:
(1) The original interval translated into a list of interval sequence
that represent the values in the target encoding.
(2) The interval sequences are plugged in between the state A and B
of the state machine.
"""
from quex.engine.state_machine.state.core import DFA_State
import quex.engine.state_machine.transformation.base as base
import quex.engine.state_machine.index as state_machine_index
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.misc.tools import flatten_list_of_lists
from collections import defaultdict
class EncodingTrafoBySplit(base.EncodingTrafo):
"""Transformation that takes a lexatom and produces a lexatom sequence.
"""
def __init__(self, Name, ErrorRangeByCodeUnitDb):
base.EncodingTrafo.__init__(self, Name,
NumberSet.from_range(0, 0x110000),
ErrorRangeByCodeUnitDb)
def do_transition(self, from_target_map, FromSi, ToSi, BadLexatomSi):
"""Translates to transition 'FromSi' --> 'ToSi' inside the state
machine according to the specific coding (see derived class, i.e.
UTF8 or UTF16).
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURNS: [0] True if complete, False else.
[1] StateDb of newly generated states.
"""
number_set = from_target_map[ToSi]
if number_set.least_greater_bound() <= self.UnchangedRange:
return True, None
if not self.cut_forbidden_range(number_set):
del from_target_map[ToSi]
return False, None
transformed_interval_sequence_list = flatten_list_of_lists(
self.get_interval_sequences(interval)
for interval in number_set.get_intervals(PromiseToTreatWellF=True)
)
new_target_map, \
new_state_db = self.plug_interval_sequences(FromSi, ToSi,
transformed_interval_sequence_list,
BadLexatomSi)
del from_target_map[ToSi]
from_target_map.update(new_target_map)
return True, new_state_db
def _do_single(self, Code):
number_set = NumberSet.from_range(Code, Code+1)
if number_set.is_empty():
return -1
interval_list = number_set.get_intervals(PromiseToTreatWellF=True)
assert len(interval_list) == 1
interval_sequence_list = self.get_interval_sequences(interval_list[0])
assert len(interval_sequence_list) == 1
assert all(x.size() == 1 for x in interval_sequence_list[0])
return [x.begin for x in interval_sequence_list[0]]
def variable_character_sizes_f(self):
return True
def lexatom_n_per_character_in_state_machine(self, SM):
lexatom_n = None
for state in SM.states.itervalues():
for number_set in state.target_map.get_map().itervalues():
candidate_lexatom_n = self.lexatom_n_per_character(number_set)
if candidate_lexatom_n is None: return None
elif lexatom_n is None: lexatom_n = candidate_lexatom_n
elif lexatom_n != candidate_lexatom_n: return None
return lexatom_n
def hopcroft_minimization_always_makes_sense(self):
return True
def plug_interval_sequences(self, FromSi, ToSi, IntervalSequenceList,
BadLexatomSi):
"""Transform the list of interval sequences into intermediate state
transitions.
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURN: [0] Target map update for the first state.
[1] State Db update for intermediate states.
"""
def simplify(tm_db, tm_end_inv, ToSi):
"""Those states which trigger on the same intervals to 'ToSi' are
equivalent, i.e. can replaced by one state.
"""
equivalence_db = {}
replacement_db = {}
for from_si, interval_list in tm_end_inv.iteritems():
key = tuple(sorted(interval_list))
equivalent_si = equivalence_db.get(key)
if equivalent_si is None: equivalence_db[key] = from_si
else: replacement_db[from_si] = equivalent_si
result = {}
for from_si, tm in tm_db.iteritems():
new_tm = defaultdict(NumberSet)
for target_si, interval in tm.iteritems():
replacement_si = replacement_db.get(target_si)
if replacement_si is not None: target_si = replacement_si
new_tm[target_si].quick_append_interval(interval)
if any(number_set.is_empty() for si, number_set in new_tm.items()):
for si, number_set in new_tm.iteritems():
print "#sim", si, number_set
if from_si in tm_end_inv:
for interval in tm_end_inv[from_si]:
new_tm[ToSi].quick_append_interval(interval)
result[from_si] = new_tm
return result
tm_db, \
tm_end_inv, \
position_db = _get_intermediate_transition_maps(FromSi, ToSi,
IntervalSequenceList)
result_tm_db = simplify(tm_db, tm_end_inv, ToSi)
if BadLexatomSi is not None:
for si, position in position_db.iteritems():
assert position != 0
self._add_transition_to_bad_lexatom_detector(result_tm_db[si],
BadLexatomSi,
position)
for tm in result_tm_db.itervalues():
assert not any(number_set.is_empty() for number_set in tm.itervalues())
# Generate the target map to be inserted into state 'FromSi'.
# Generate list of intermediate states that implement the sequence
# of intervals.
first_tm = result_tm_db.pop(FromSi)
new_state_db = dict(
(si, DFA_State.from_TargetMap(tm)) for si, tm in result_tm_db.iteritems()
)
return first_tm, new_state_db
def __bunch_iterable(IntervalSequenceList, Index):
"""Iterate over sub-bunches of sequence in 'IntervalSequenceList' which are
the same at the given 'Position'. The 'IntervalSequenceList' must be sorted!
That is, same intervals must be adjacent.
EXAMPLE:
Index = 1
IntervalSequenceList = [
[ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ],
[ interval02, interval12, interval22, interval30 ],
[ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
That is, the interval sequences are grouped according to groups where the
second interval (Index=1) is equal, the yields are as follows:
(1) [ [ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ] ]
(2) [ [ interval02, interval12, interval22, interval30 ] ]
(3) [ [ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
NOTE: Two sequences of different lengths are *never* grouped together
-- by purpose.
The index is provided in order to avoid the creation of shorted sub-
sequences. Instead, the caller focusses on sub-sequences behind 'Index'.
Obviously, this function only makes sense if the intervals before 'Index'
are all the same.
YIELDS: [0] Interval which is the same for group of sequenes at 'Index'.
[1] Group of sequences.
[2] 'LastF' -- telling whether the interval is the last in the
sequence.
"""
prev_interval = None
prev_i = -1
prev_last_f = False
for i, sequence in enumerate(IntervalSequenceList):
interval = sequence[Index]
if interval.is_empty(): print "#bu:", interval; assert False
L = len(sequence)
last_f = L == Index + 1
if interval != prev_interval or last_f != prev_last_f:
if prev_i != -1:
yield prev_interval, IntervalSequenceList[prev_i:i], prev_last_f
prev_i = i
prev_interval = interval
prev_last_f = last_f
yield prev_interval, IntervalSequenceList[prev_i:], prev_last_f
def _get_intermediate_transition_maps(FromSi, ToSi, interval_sequence_list):
"""Several transitions are to be inserted in between state 'FromSi' and
'ToSi'. The transitions result from the list of sequences in
'interval_sequence_list'. This function develops the transition maps
of the states involved. Also, it notifies about the 'position' of each
state in the code unit sequence. Thus, the caller may insert error-detectors
on invalid code units.
FORBIDDEN: There cannot be a sequence that starts with the exact intervals
as a shorter sequences. Example:
[ (0, 1), (0, 2), (0, 3) ] #
[ (0, 1), (0, 2) ] # Bad, very bad!
This would mean that after (0, 1), (0, 2) the 'ToSi' is reached, but then
after (0, 3) again. The result is an *iteration* on 'ToSi'
--(0, 1)-->( A )--(0, 2)-->( ToSi )---->
| |
'-<-(0, 3)--'
Consequently, such a list of interval sequences cannot represent a linear
transition.
RETURNS: [0] Transition Map DB: state_index --> 'TransitionMap'
with TransitionMap: target_state_index --> Interval
That is 'TransitionMap[target_state_index]' tells through which
intervals the 'state_index' triggers to 'target_states'
The 'Transition Map DB' does not contain transitions to the
'ToSi'--the end state.
[1] Inverse End Transition Map:
Transitions to the end state are stored inversely:
from_state_index --> list of Interval-s
The end state can be reached by more than one interval, so a
list of Interval-s is associated with the transition
'from_state_index' to 'ToSi'.
[1] PositionDB: state_index --> position in code unit sequence.
"""
# Sort the list of sequences, so that adjacent intervals are listed one
# after the other. This is necessary for '__bunch_iterable()' to function.
interval_sequence_list.sort()
worklist = [
# The state at 'BeginStateIndex' is concerned with the intervals
# at position '0' in the 'interval_sequence_list'. The list needs to
# be grouped according to the first interval, and for each distinct
# interval a transition to another state must be generated.
(FromSi, interval_sequence_list, 0)
]
tm_db = defaultdict(dict)
tm_end_inv = defaultdict(list)
position_db = {}
while worklist:
si, sequence_group, index = worklist.pop()
# -- State 'si' triggers on intervals at 'index' in 'sequence_group'.
tm = tm_db[si]
# -- State 'si' comes at position 'index' in a sequence of code units.
# (position of 'FromSi' shall not appear in the 'position_db' since
# the error detection of the first state is done in the caller.)
if si != FromSi: position_db[si] = index
# Group the sequences according to the interval at position 'index'.
for interval, sub_group, last_f in __bunch_iterable(sequence_group, index):
# Transit to new state for the given sub-group of sequences.
if not last_f:
# For each 'interval' a deliberate target state is generated.
# => each target state is only reached by a single Interval.
new_si = state_machine_index.get()
tm[new_si] = interval
worklist.append((new_si, sub_group, index+1))
else:
# If the 'interval' is the last in the sequence, the 'ToSi' is
# reached. Obviously this may/should happen more than once.
tm_end_inv[si].append(interval)
return tm_db, tm_end_inv, position_db
| false
| true
|
7908d2573074e550da58f0b678637c9c8a53bac2
| 2,436
|
py
|
Python
|
src/visualization_simulator/src/ui/ui_birdview.py
|
AndyYangjd/data_fuse_demo
|
3e19e42c2e02795e8b11aa60e5310c02a3e04316
|
[
"BSD-3-Clause"
] | 8
|
2020-10-09T13:43:51.000Z
|
2022-01-17T06:18:52.000Z
|
src/visualization_simulator/src/ui/ui_birdview.py
|
AndyYangjd/data_fuse_demo
|
3e19e42c2e02795e8b11aa60e5310c02a3e04316
|
[
"BSD-3-Clause"
] | null | null | null |
src/visualization_simulator/src/ui/ui_birdview.py
|
AndyYangjd/data_fuse_demo
|
3e19e42c2e02795e8b11aa60e5310c02a3e04316
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'birdview.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_birdview(object):
def setupUi(self, birdview):
birdview.setObjectName("birdview")
birdview.resize(552, 551)
self.verticalLayout = QtWidgets.QVBoxLayout(birdview)
self.verticalLayout.setContentsMargins(5, 5, 5, 5)
self.verticalLayout.setSpacing(2)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnOpenFile = QtWidgets.QPushButton(birdview)
self.btnOpenFile.setObjectName("btnOpenFile")
self.horizontalLayout.addWidget(self.btnOpenFile)
self.lab_file_name = QtWidgets.QLabel(birdview)
self.lab_file_name.setAlignment(QtCore.Qt.AlignCenter)
self.lab_file_name.setObjectName("lab_file_name")
self.horizontalLayout.addWidget(self.lab_file_name)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 4)
self.verticalLayout.addLayout(self.horizontalLayout)
self.vbox_bd = QtWidgets.QVBoxLayout()
self.vbox_bd.setObjectName("vbox_bd")
self.verticalLayout.addLayout(self.vbox_bd)
self.hbox_btn_slider = QtWidgets.QHBoxLayout()
self.hbox_btn_slider.setObjectName("hbox_btn_slider")
self.media_grid = QtWidgets.QGridLayout()
self.media_grid.setObjectName("media_grid")
self.hbox_btn_slider.addLayout(self.media_grid)
self.verticalLayout.addLayout(self.hbox_btn_slider)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 20)
self.verticalLayout.setStretch(2, 1)
self.retranslateUi(birdview)
QtCore.QMetaObject.connectSlotsByName(birdview)
def retranslateUi(self, birdview):
_translate = QtCore.QCoreApplication.translate
birdview.setWindowTitle(_translate("birdview", "BirdView"))
self.btnOpenFile.setText(_translate("birdview", "Open xls"))
self.lab_file_name.setText(_translate("birdview", "xls_name"))
| 43.5
| 75
| 0.71798
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_birdview(object):
def setupUi(self, birdview):
birdview.setObjectName("birdview")
birdview.resize(552, 551)
self.verticalLayout = QtWidgets.QVBoxLayout(birdview)
self.verticalLayout.setContentsMargins(5, 5, 5, 5)
self.verticalLayout.setSpacing(2)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnOpenFile = QtWidgets.QPushButton(birdview)
self.btnOpenFile.setObjectName("btnOpenFile")
self.horizontalLayout.addWidget(self.btnOpenFile)
self.lab_file_name = QtWidgets.QLabel(birdview)
self.lab_file_name.setAlignment(QtCore.Qt.AlignCenter)
self.lab_file_name.setObjectName("lab_file_name")
self.horizontalLayout.addWidget(self.lab_file_name)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 4)
self.verticalLayout.addLayout(self.horizontalLayout)
self.vbox_bd = QtWidgets.QVBoxLayout()
self.vbox_bd.setObjectName("vbox_bd")
self.verticalLayout.addLayout(self.vbox_bd)
self.hbox_btn_slider = QtWidgets.QHBoxLayout()
self.hbox_btn_slider.setObjectName("hbox_btn_slider")
self.media_grid = QtWidgets.QGridLayout()
self.media_grid.setObjectName("media_grid")
self.hbox_btn_slider.addLayout(self.media_grid)
self.verticalLayout.addLayout(self.hbox_btn_slider)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 20)
self.verticalLayout.setStretch(2, 1)
self.retranslateUi(birdview)
QtCore.QMetaObject.connectSlotsByName(birdview)
def retranslateUi(self, birdview):
_translate = QtCore.QCoreApplication.translate
birdview.setWindowTitle(_translate("birdview", "BirdView"))
self.btnOpenFile.setText(_translate("birdview", "Open xls"))
self.lab_file_name.setText(_translate("birdview", "xls_name"))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.