hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72ee556b494b4e5facd04b0cd2dbe2c186e4740 | 1,156 | py | Python | plugins/math/komand_math/actions/calculate/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/math/komand_math/actions/calculate/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/math/komand_math/actions/calculate/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import insightconnect_plugin_runtime
from .schema import CalculateInput, CalculateOutput, Input, Output, Component
from insightconnect_plugin_runtime.exceptions import PluginException
from simpleeval import simple_eval
class Calculate(insightconnect_plugin_runtime.Action):
_result = None
def __init__(self):
super(self.__class__, self).__init__(
name="calculate",
description=Component.DESCRIPTION,
input=CalculateInput(),
output=CalculateOutput(),
)
def run(self, params={}):
equation = params.get(Input.EQUATION)
result = Calculate.execute_equation(equation)
if result is None:
raise PluginException(
cause="Calculation error",
assistance="Error occurred while calculating the equation. Check to make sure it is valid and try "
"again. ",
)
return {Output.RESULT: result}
@staticmethod
def execute_equation(eq):
eq = str().join([c for c in eq if (c.isdecimal() or c in ["+", "-", "*", "/", "**", "%", "(", ")", "."])])
return simple_eval(eq)
| 33.028571 | 115 | 0.622837 | import insightconnect_plugin_runtime
from .schema import CalculateInput, CalculateOutput, Input, Output, Component
from insightconnect_plugin_runtime.exceptions import PluginException
from simpleeval import simple_eval
class Calculate(insightconnect_plugin_runtime.Action):
_result = None
def __init__(self):
super(self.__class__, self).__init__(
name="calculate",
description=Component.DESCRIPTION,
input=CalculateInput(),
output=CalculateOutput(),
)
def run(self, params={}):
equation = params.get(Input.EQUATION)
result = Calculate.execute_equation(equation)
if result is None:
raise PluginException(
cause="Calculation error",
assistance="Error occurred while calculating the equation. Check to make sure it is valid and try "
"again. ",
)
return {Output.RESULT: result}
@staticmethod
def execute_equation(eq):
eq = str().join([c for c in eq if (c.isdecimal() or c in ["+", "-", "*", "/", "**", "%", "(", ")", "."])])
return simple_eval(eq)
| true | true |
f72ee7470b394ffe1d61f50214d906f893d3278c | 937 | py | Python | server/api/urls.py | di-unipi-socc/MicroAnalyserServer | aa066df5cf329328e82c1d72cf09ddc249887a64 | [
"MIT"
] | 13 | 2019-10-01T20:23:38.000Z | 2021-11-24T09:14:04.000Z | server/api/urls.py | di-unipi-socc/microFreshener | aa066df5cf329328e82c1d72cf09ddc249887a64 | [
"MIT"
] | 21 | 2019-11-04T15:29:23.000Z | 2022-02-26T10:41:20.000Z | server/api/urls.py | di-unipi-socc/MicroAnalyserServer | aa066df5cf329328e82c1d72cf09ddc249887a64 | [
"MIT"
] | 2 | 2021-10-11T07:48:36.000Z | 2021-12-14T21:02:03.000Z | # api/urls.py
from django.contrib import admin
from django.urls import include, path
from rest_framework.urlpatterns import format_suffix_patterns
from api import view
urlpatterns = [
path('model', view.graph),
path('analyse', view.graph_analysis),
path('export', view.graph_export),
path('import', view.graph_import),
path('refine/istio', view.graph_refine_istio),
path('refine', view.graph_refine),
path('example', view.graph_examples),
# team api
# path('team/', view.team, name='microtosca-team'),
path('team/<str:team_name>', view.team_detail, name='microtosca-team-get'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
# We don't necessarily need to add these extra url patterns in,
# but it gives us a simple, clean way of referring to a specific format.
# http http://127.0.0.1:8000/snippets.json # JSON suffix
# http http://127.0.0.1:8000/snippets.api # Browsable API suffix
| 34.703704 | 79 | 0.721451 |
from django.contrib import admin
from django.urls import include, path
from rest_framework.urlpatterns import format_suffix_patterns
from api import view
urlpatterns = [
path('model', view.graph),
path('analyse', view.graph_analysis),
path('export', view.graph_export),
path('import', view.graph_import),
path('refine/istio', view.graph_refine_istio),
path('refine', view.graph_refine),
path('example', view.graph_examples),
path('team/<str:team_name>', view.team_detail, name='microtosca-team-get'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
# but it gives us a simple, clean way of referring to a specific format.
# http http://127.0.0.1:8000/snippets.json # JSON suffix
# http http://127.0.0.1:8000/snippets.api # Browsable API suffix
| true | true |
f72ee8622daf69c1cb94611e88480ed29b8c08ab | 4,390 | py | Python | paddlers/models/ppdet/utils/profiler.py | Xiaracto/PaddleRS | 6e9b99a79fe740775daff310bd1ca3e6fbc86f80 | [
"Apache-2.0"
] | 1 | 2022-03-30T10:59:58.000Z | 2022-03-30T10:59:58.000Z | paddlers/models/ppdet/utils/profiler.py | wondering516/PaddleRS | b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f | [
"Apache-2.0"
] | null | null | null | paddlers/models/ppdet/utils/profiler.py | wondering516/PaddleRS | b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
# A global variable to record the number of calling times for profiler
# functions. It is used to specify the tracing range of training steps.
_profiler_step_id = 0
# A global variable to avoid parsing from string every time.
_profiler_options = None
class ProfilerOptions(object):
'''
Use a string to initialize a ProfilerOptions.
The string should be in the format: "key1=value1;key2=value;key3=value3".
For example:
"profile_path=model.profile"
"batch_range=[50, 60]; profile_path=model.profile"
"batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile"
ProfilerOptions supports following key-value pair:
batch_range - a integer list, e.g. [100, 110].
state - a string, the optional values are 'CPU', 'GPU' or 'All'.
sorted_key - a string, the optional values are 'calls', 'total',
'max', 'min' or 'ave.
tracer_option - a string, the optional values are 'Default', 'OpDetail',
'AllOpDetail'.
profile_path - a string, the path to save the serialized profile data,
which can be used to generate a timeline.
exit_on_finished - a boolean.
'''
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True
}
self._parse_from_string(options_str)
def _parse_from_string(self, options_str):
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
'''
Enable the operator-level timing using PaddlePaddle's profiler.
The profiler uses a independent variable to count the profiler steps.
One call of this function is treated as a profiler step.
Args:
profiler_options - a string to initialize the ProfilerOptions.
Default is None, and the profiler is disabled.
'''
if options_str is None:
return
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
if _profiler_step_id == _profiler_options['batch_range'][0]:
paddle.utils.profiler.start_profiler(
_profiler_options['state'], _profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1
| 39.196429 | 81 | 0.628246 |
import sys
import paddle
_profiler_step_id = 0
_profiler_options = None
class ProfilerOptions(object):
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True
}
self._parse_from_string(options_str)
def _parse_from_string(self, options_str):
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
if options_str is None:
return
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
if _profiler_step_id == _profiler_options['batch_range'][0]:
paddle.utils.profiler.start_profiler(
_profiler_options['state'], _profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1
| true | true |
f72ee96b3252f32009b52b97e42cb9a9308172b4 | 2,405 | py | Python | tools/train.py | yoxu515/aot-benchmark | 5a7665fc8e0f0e64bc8ba6028b15d9ab32f4c56a | [
"BSD-3-Clause"
] | 105 | 2021-11-16T12:43:59.000Z | 2022-03-31T08:05:11.000Z | tools/train.py | lingyunwu14/aot-benchmark | 99f74f051c91ac221e44f3edab3534ae4dd233f7 | [
"BSD-3-Clause"
] | 14 | 2021-11-18T09:52:36.000Z | 2022-03-31T16:26:32.000Z | tools/train.py | lingyunwu14/aot-benchmark | 99f74f051c91ac221e44f3edab3534ae4dd233f7 | [
"BSD-3-Clause"
] | 17 | 2021-11-16T13:28:29.000Z | 2022-03-29T02:14:48.000Z | import importlib
import random
import sys
sys.setrecursionlimit(10000)
sys.path.append('.')
sys.path.append('..')
import torch.multiprocessing as mp
from networks.managers.trainer import Trainer
def main_worker(gpu, cfg, enable_amp=True):
# Initiate a training manager
trainer = Trainer(rank=gpu, cfg=cfg, enable_amp=enable_amp)
# Start Training
trainer.sequential_training()
def main():
import argparse
parser = argparse.ArgumentParser(description="Train VOS")
parser.add_argument('--exp_name', type=str, default='')
parser.add_argument('--stage', type=str, default='pre')
parser.add_argument('--model', type=str, default='aott')
parser.add_argument('--start_gpu', type=int, default=0)
parser.add_argument('--gpu_num', type=int, default=-1)
parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--dist_url', type=str, default='')
parser.add_argument('--amp', action='store_true')
parser.set_defaults(amp=False)
parser.add_argument('--pretrained_path', type=str, default='')
parser.add_argument('--datasets', nargs='+', type=str, default=[])
parser.add_argument('--lr', type=float, default=-1.)
parser.add_argument('--total_step', type=int, default=-1.)
parser.add_argument('--start_step', type=int, default=-1.)
args = parser.parse_args()
engine_config = importlib.import_module('configs.' + args.stage)
cfg = engine_config.EngineConfig(args.exp_name, args.model)
if len(args.datasets) > 0:
cfg.DATASETS = args.datasets
cfg.DIST_START_GPU = args.start_gpu
if args.gpu_num > 0:
cfg.TRAIN_GPUS = args.gpu_num
if args.batch_size > 0:
cfg.TRAIN_BATCH_SIZE = args.batch_size
if args.pretrained_path != '':
cfg.PRETRAIN_MODEL = args.pretrained_path
if args.lr > 0:
cfg.TRAIN_LR = args.lr
if args.total_step > 0:
cfg.TRAIN_TOTAL_STEPS = args.total_step
if args.start_step > 0:
cfg.TRAIN_START_STEP = args.start_step
if args.dist_url == '':
cfg.DIST_URL = 'tcp://127.0.0.1:123' + str(random.randint(0, 9)) + str(
random.randint(0, 9))
else:
cfg.DIST_URL = args.dist_url
# Use torch.multiprocessing.spawn to launch distributed processes
mp.spawn(main_worker, nprocs=cfg.TRAIN_GPUS, args=(cfg, args.amp))
if __name__ == '__main__':
main()
| 30.0625 | 79 | 0.675676 | import importlib
import random
import sys
sys.setrecursionlimit(10000)
sys.path.append('.')
sys.path.append('..')
import torch.multiprocessing as mp
from networks.managers.trainer import Trainer
def main_worker(gpu, cfg, enable_amp=True):
trainer = Trainer(rank=gpu, cfg=cfg, enable_amp=enable_amp)
trainer.sequential_training()
def main():
import argparse
parser = argparse.ArgumentParser(description="Train VOS")
parser.add_argument('--exp_name', type=str, default='')
parser.add_argument('--stage', type=str, default='pre')
parser.add_argument('--model', type=str, default='aott')
parser.add_argument('--start_gpu', type=int, default=0)
parser.add_argument('--gpu_num', type=int, default=-1)
parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--dist_url', type=str, default='')
parser.add_argument('--amp', action='store_true')
parser.set_defaults(amp=False)
parser.add_argument('--pretrained_path', type=str, default='')
parser.add_argument('--datasets', nargs='+', type=str, default=[])
parser.add_argument('--lr', type=float, default=-1.)
parser.add_argument('--total_step', type=int, default=-1.)
parser.add_argument('--start_step', type=int, default=-1.)
args = parser.parse_args()
engine_config = importlib.import_module('configs.' + args.stage)
cfg = engine_config.EngineConfig(args.exp_name, args.model)
if len(args.datasets) > 0:
cfg.DATASETS = args.datasets
cfg.DIST_START_GPU = args.start_gpu
if args.gpu_num > 0:
cfg.TRAIN_GPUS = args.gpu_num
if args.batch_size > 0:
cfg.TRAIN_BATCH_SIZE = args.batch_size
if args.pretrained_path != '':
cfg.PRETRAIN_MODEL = args.pretrained_path
if args.lr > 0:
cfg.TRAIN_LR = args.lr
if args.total_step > 0:
cfg.TRAIN_TOTAL_STEPS = args.total_step
if args.start_step > 0:
cfg.TRAIN_START_STEP = args.start_step
if args.dist_url == '':
cfg.DIST_URL = 'tcp://127.0.0.1:123' + str(random.randint(0, 9)) + str(
random.randint(0, 9))
else:
cfg.DIST_URL = args.dist_url
mp.spawn(main_worker, nprocs=cfg.TRAIN_GPUS, args=(cfg, args.amp))
if __name__ == '__main__':
main()
| true | true |
f72eea59e503d70ee01dac7d729bac23d7d76c56 | 1,283 | py | Python | alipay/aop/api/response/AlipayInsUnderwriteUserPolicyQueryResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayInsUnderwriteUserPolicyQueryResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayInsUnderwriteUserPolicyQueryResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InsPolicy import InsPolicy
class AlipayInsUnderwriteUserPolicyQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsUnderwriteUserPolicyQueryResponse, self).__init__()
self._policys = None
self._total = None
@property
def policys(self):
return self._policys
@policys.setter
def policys(self, value):
if isinstance(value, list):
self._policys = list()
for i in value:
if isinstance(i, InsPolicy):
self._policys.append(i)
else:
self._policys.append(InsPolicy.from_alipay_dict(i))
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayInsUnderwriteUserPolicyQueryResponse, self).parse_response_content(response_content)
if 'policys' in response:
self.policys = response['policys']
if 'total' in response:
self.total = response['total']
| 29.837209 | 115 | 0.646921 |
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InsPolicy import InsPolicy
class AlipayInsUnderwriteUserPolicyQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsUnderwriteUserPolicyQueryResponse, self).__init__()
self._policys = None
self._total = None
@property
def policys(self):
return self._policys
@policys.setter
def policys(self, value):
if isinstance(value, list):
self._policys = list()
for i in value:
if isinstance(i, InsPolicy):
self._policys.append(i)
else:
self._policys.append(InsPolicy.from_alipay_dict(i))
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayInsUnderwriteUserPolicyQueryResponse, self).parse_response_content(response_content)
if 'policys' in response:
self.policys = response['policys']
if 'total' in response:
self.total = response['total']
| true | true |
f72eead581d421c3d44843fa5cbdc580e431a508 | 9,270 | py | Python | Manuscript-figures/xcorr_lag_composite.py | ehultee/helheim-fiesta | ed4e3f4ceac58137c1a4066941783a5c38d29c9b | [
"MIT"
] | null | null | null | Manuscript-figures/xcorr_lag_composite.py | ehultee/helheim-fiesta | ed4e3f4ceac58137c1a4066941783a5c38d29c9b | [
"MIT"
] | null | null | null | Manuscript-figures/xcorr_lag_composite.py | ehultee/helheim-fiesta | ed4e3f4ceac58137c1a4066941783a5c38d29c9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Six-panel xcorr *and* lag
Created on Thu Feb 4 18:21:08 2021
@author: lizz
"""
div_colors = 'RdBu' # choose divergent colormap for xcorr
# lag_colors = 'PiYG' # choose divergent colormap for lag
corrnorm_min, corrnorm_max = -0.3, 0.3
# lagnorm_min, lagnorm_max = -365, 365
lag_colors = 'Greens'
lagnorm_min, lagnorm_max = 0, 365
## set matplotlib font size defaults
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
sig_markers = ['o', 'x']
## black-white hillshade topo underneath
rgb2 = ls.shade(np.asarray(b_hel), cmap=plt.get_cmap('gray'), blend_mode='overlay',
dx=np.mean(np.diff(x_hel)), dy=np.mean(np.diff(y_hel)), vert_exag=5.)
fig, ((ax1, ax2, ax3), (ax4,ax5,ax6)) = plt.subplots(nrows=2,ncols=3, figsize=(12, 8),
# constrained_layout=True,
sharex=True, sharey=True,
gridspec_kw={'wspace':0.01})
ax1.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc1 = ax1.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_corr_amax)[smb_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax1.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_corr_amax)[np.invert(smb_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max) #different marker for insig values
# sc1 = ax1.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=smb_corr_amax, cmap=div_colors,
# vmin=corrnorm_min, vmax=corrnorm_max)
# ## set up correctly scaled colorbar
# div1 = make_axes_locatable(ax1)
# cax1 = div1.append_axes("right", size="5%", pad=0.1)
# plt.colorbar(sc1, cax=cax1)
# cb1.ax.set_title('AMax. xcorr')
ax1.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
ylabel='Northing [km]', title='Catchment SMB')
ax2.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc2 = ax2.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_corr_amax)[runoff_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax2.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_corr_amax)[np.invert(runoff_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max) # distinguish insig values
# sc2 = ax2.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=runoff_corr_amax, cmap=div_colors,
# vmin=corrnorm_min, vmax=corrnorm_max)
# ## set up correctly scaled colorbar
# div2 = make_axes_locatable(ax2)
# cax2 = div2.append_axes("right", size="5%", pad=0.1)
# fig.colorbar(sc2, cax=cax2)
# cb2.ax.set_title('AMax. xcorr')
ax2.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Catchment runoff')
ax3.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc3 = ax3.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_corr_amax)[terminus_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax3.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_corr_amax)[np.invert(terminus_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
# sc3 = ax3.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=terminus_corr_amax, cmap=div_colors,
# vmin=corrnorm_min, vmax=corrnorm_max)
## set up correctly scaled colorbar - one for all xcorr plots
div3 = make_axes_locatable(ax3)
cax3 = div3.append_axes("right", size="5%", pad=0.1)
cb3 = fig.colorbar(sc3, cax=cax3)
cb3.ax.set_ylabel('AMax. xcorr')
ax3.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Terminus position', aspect=1.)
## SECOND ROW
ax4.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc4 = ax4.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_lag_amax)[smb_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax4.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_lag_amax)[np.invert(smb_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
# sc4 = ax4.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=smb_lag_amax, cmap=lag_colors,
# vmin=lagnorm_min, vmax=lagnorm_max)
# ## set up correctly scaled colorbar
# div4 = make_axes_locatable(ax4)
# cax4 = div4.append_axes("right", size="5%", pad=0.1)
# plt.colorbar(sc4, cax=cax4)
# cb1.ax.set_title('Lag [d] at peak xcorr')
ax4.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]')
ax5.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc5 = ax5.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_lag_amax)[runoff_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax5.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_lag_amax)[np.invert(runoff_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
# sc5 = ax5.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=runoff_lag_amax, cmap=lag_colors,
# vmin=lagnorm_min, vmax=lagnorm_max)
# ## set up correctly scaled colorbar
# div5 = make_axes_locatable(ax5)
# cax5 = div5.append_axes("right", size="5%", pad=0.1)
# fig.colorbar(sc5, cax=cax5)
# cb2.ax.set_title('Lag [d] at peak xcorr')
ax5.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]')
ax6.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc6 = ax6.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_lag_amax)[terminus_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax6.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_lag_amax)[np.invert(terminus_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
# sc6 = ax6.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=terminus_lag_amax, cmap=lag_colors,
# vmin=lagnorm_min, vmax=lagnorm_max)
## set up correctly scaled colorbar
div6 = make_axes_locatable(ax6)
cax6 = div6.append_axes("right", size="5%", pad=0.1)
cb6 = fig.colorbar(sc6, cax=cax6)
cb6.ax.set_ylabel('Lag [d] at peak xcorr')
cb6.set_ticks([0, 60, 120, 180, 240, 300, 360])
ax6.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', aspect=1.)
# plt.tight_layout()
# plt.show()
# plt.savefig('/Users/lizz/Desktop/20210204-helheim-xcorr_lag_composite') | 58.670886 | 119 | 0.66548 |
div_colors = 'RdBu'
0.3
lag_colors = 'Greens'
lagnorm_min, lagnorm_max = 0, 365
IGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE)
plt.rc('axes', titlesize=BIGGER_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
plt.rc('figure', titlesize=BIGGER_SIZE)
sig_markers = ['o', 'x']
p=plt.get_cmap('gray'), blend_mode='overlay',
dx=np.mean(np.diff(x_hel)), dy=np.mean(np.diff(y_hel)), vert_exag=5.)
fig, ((ax1, ax2, ax3), (ax4,ax5,ax6)) = plt.subplots(nrows=2,ncols=3, figsize=(12, 8),
sharex=True, sharey=True,
gridspec_kw={'wspace':0.01})
ax1.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc1 = ax1.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_corr_amax)[smb_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax1.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_corr_amax)[np.invert(smb_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
ylabel='Northing [km]', title='Catchment SMB')
ax2.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc2 = ax2.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_corr_amax)[runoff_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax2.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_corr_amax)[np.invert(runoff_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Catchment runoff')
ax3.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc3 = ax3.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_corr_amax)[terminus_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax3.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_corr_amax)[np.invert(terminus_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
ght", size="5%", pad=0.1)
cb3 = fig.colorbar(sc3, cax=cax3)
cb3.ax.set_ylabel('AMax. xcorr')
ax3.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Terminus position', aspect=1.)
rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc4 = ax4.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_lag_amax)[smb_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax4.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_lag_amax)[np.invert(smb_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]')
ax5.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc5 = ax5.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_lag_amax)[runoff_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax5.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_lag_amax)[np.invert(runoff_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]')
ax6.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc6 = ax6.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_lag_amax)[terminus_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax6.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_lag_amax)[np.invert(terminus_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
ax6 = div6.append_axes("right", size="5%", pad=0.1)
cb6 = fig.colorbar(sc6, cax=cax6)
cb6.ax.set_ylabel('Lag [d] at peak xcorr')
cb6.set_ticks([0, 60, 120, 180, 240, 300, 360])
ax6.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', aspect=1.)
| true | true |
f72eeb8fcef6921a2eaeb55802122ae8eb9fcfcd | 159,724 | py | Python | tests/test_mishchenko_refllib.py | ghislainp/mishchenko_brf | de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1 | [
"MIT"
] | 2 | 2021-12-19T17:43:48.000Z | 2022-01-30T21:08:54.000Z | tests/test_mishchenko_refllib.py | ghislainp/mishchenko_brf | de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1 | [
"MIT"
] | null | null | null | tests/test_mishchenko_refllib.py | ghislainp/mishchenko_brf | de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `mishchenko_brf` package."""
import numpy as np
from mishchenko_brf.lib.refl import brf
def test_brf():
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
ssalb, _, legendre = setup()
_, spherical_albedo, albedo, _, r = brf(ssalb, len(legendre), legendre)
exptected_spherical_albedo, expected_albedo, expected_r1 = results()
np.testing.assert_allclose(albedo, expected_albedo, atol=1e-6, rtol=0)
r1 = np.concatenate([r[1, i, : i + 1] for i in range(r.shape[1])])
np.testing.assert_allclose(r1, expected_r1, atol=1e-5, rtol=0)
def setup():
ssalb = 0.85404045e00
# 642
Legendre_coef = [
0.1000000e01,
0.2512562e01,
0.3759305e01,
0.4408389e01,
0.5536463e01,
0.6260982e01,
0.7525636e01,
0.8312788e01,
0.9542491e01,
0.1040885e02,
0.1151645e02,
0.1244280e02,
0.1343854e02,
0.1442002e02,
0.1533074e02,
0.1628946e02,
0.1717182e02,
0.1807816e02,
0.1898665e02,
0.1978204e02,
0.2073036e02,
0.2142049e02,
0.2241713e02,
0.2301598e02,
0.2401247e02,
0.2456069e02,
0.2552589e02,
0.2607232e02,
0.2695832e02,
0.2752722e02,
0.2831653e02,
0.2892665e02,
0.2962000e02,
0.3025463e02,
0.3086891e02,
0.3150598e02,
0.3207453e02,
0.3268248e02,
0.3323146e02,
0.3378450e02,
0.3433640e02,
0.3482245e02,
0.3538333e02,
0.3580127e02,
0.3636525e02,
0.3672771e02,
0.3728034e02,
0.3760455e02,
0.3812729e02,
0.3843143e02,
0.3890899e02,
0.3920687e02,
0.3962926e02,
0.3992750e02,
0.4029233e02,
0.4059085e02,
0.4090206e02,
0.4119521e02,
0.4146075e02,
0.4174035e02,
0.4196960e02,
0.4222747e02,
0.4242858e02,
0.4265858e02,
0.4283710e02,
0.4303618e02,
0.4319451e02,
0.4336267e02,
0.4350045e02,
0.4364008e02,
0.4375514e02,
0.4386992e02,
0.4395939e02,
0.4405318e02,
0.4411450e02,
0.4419058e02,
0.4422209e02,
0.4428264e02,
0.4428387e02,
0.4432995e02,
0.4430155e02,
0.4433324e02,
0.4427669e02,
0.4429349e02,
0.4421068e02,
0.4421187e02,
0.4410481e02,
0.4408970e02,
0.4396023e02,
0.4392847e02,
0.4377812e02,
0.4372965e02,
0.4355963e02,
0.4349478e02,
0.4330600e02,
0.4322534e02,
0.4301853e02,
0.4292279e02,
0.4269857e02,
0.4258853e02,
0.4234756e02,
0.4222396e02,
0.4196694e02,
0.4183048e02,
0.4155822e02,
0.4140946e02,
0.4112286e02,
0.4096230e02,
0.4066235e02,
0.4049042e02,
0.4017813e02,
0.3999523e02,
0.3967166e02,
0.3947818e02,
0.3914435e02,
0.3894071e02,
0.3859761e02,
0.3838426e02,
0.3803282e02,
0.3781024e02,
0.3745135e02,
0.3722008e02,
0.3685457e02,
0.3661513e02,
0.3624382e02,
0.3599677e02,
0.3562045e02,
0.3536629e02,
0.3498576e02,
0.3472502e02,
0.3434105e02,
0.3407421e02,
0.3368756e02,
0.3341511e02,
0.3302651e02,
0.3274894e02,
0.3235911e02,
0.3207689e02,
0.3168649e02,
0.3140011e02,
0.3100977e02,
0.3071973e02,
0.3033004e02,
0.3003681e02,
0.2964833e02,
0.2935240e02,
0.2896567e02,
0.2866749e02,
0.2828303e02,
0.2798304e02,
0.2760134e02,
0.2729996e02,
0.2692148e02,
0.2661913e02,
0.2624432e02,
0.2594138e02,
0.2557065e02,
0.2526752e02,
0.2490123e02,
0.2459831e02,
0.2423680e02,
0.2393445e02,
0.2357803e02,
0.2327663e02,
0.2292556e02,
0.2262546e02,
0.2228000e02,
0.2198153e02,
0.2164193e02,
0.2134540e02,
0.2101185e02,
0.2071756e02,
0.2039027e02,
0.2009849e02,
0.1977763e02,
0.1948862e02,
0.1917433e02,
0.1888834e02,
0.1858075e02,
0.1829802e02,
0.1799722e02,
0.1771798e02,
0.1742405e02,
0.1714851e02,
0.1686151e02,
0.1658986e02,
0.1630983e02,
0.1604226e02,
0.1576923e02,
0.1550590e02,
0.1523988e02,
0.1498093e02,
0.1472193e02,
0.1446749e02,
0.1421550e02,
0.1396569e02,
0.1372068e02,
0.1347561e02,
0.1323754e02,
0.1299730e02,
0.1276613e02,
0.1253080e02,
0.1230645e02,
0.1207611e02,
0.1185852e02,
0.1163322e02,
0.1142231e02,
0.1120210e02,
0.1099778e02,
0.1078270e02,
0.1058487e02,
0.1037494e02,
0.1018351e02,
0.9978738e01,
0.9793600e01,
0.9593997e01,
0.9415044e01,
0.9220600e01,
0.9047715e01,
0.8858418e01,
0.8691482e01,
0.8507312e01,
0.8346198e01,
0.8167129e01,
0.8011710e01,
0.7837708e01,
0.7687854e01,
0.7518876e01,
0.7374456e01,
0.7210453e01,
0.7071336e01,
0.6912254e01,
0.6778307e01,
0.6624084e01,
0.6495174e01,
0.6345745e01,
0.6221738e01,
0.6077033e01,
0.5957794e01,
0.5817740e01,
0.5703134e01,
0.5567654e01,
0.5457548e01,
0.5326563e01,
0.5220821e01,
0.5094248e01,
0.4992739e01,
0.4870492e01,
0.4773085e01,
0.4655075e01,
0.4561642e01,
0.4447778e01,
0.4358190e01,
0.4248381e01,
0.4162514e01,
0.4056666e01,
0.3974395e01,
0.3872413e01,
0.3793618e01,
0.3695406e01,
0.3619966e01,
0.3525429e01,
0.3453228e01,
0.3362271e01,
0.3293193e01,
0.3205718e01,
0.3139651e01,
0.3055564e01,
0.2992397e01,
0.2911601e01,
0.2851228e01,
0.2773628e01,
0.2715944e01,
0.2641446e01,
0.2586348e01,
0.2514857e01,
0.2462248e01,
0.2393671e01,
0.2343453e01,
0.2277698e01,
0.2229778e01,
0.2166754e01,
0.2121041e01,
0.2060659e01,
0.2017065e01,
0.1959237e01,
0.1917674e01,
0.1862314e01,
0.1822700e01,
0.1769722e01,
0.1731977e01,
0.1681298e01,
0.1645344e01,
0.1596882e01,
0.1562643e01,
0.1516319e01,
0.1483723e01,
0.1439458e01,
0.1408435e01,
0.1366152e01,
0.1336633e01,
0.1296260e01,
0.1268180e01,
0.1229642e01,
0.1202937e01,
0.1166165e01,
0.1140775e01,
0.1105699e01,
0.1081566e01,
0.1048119e01,
0.1025186e01,
0.9933033e00,
0.9715168e00,
0.9411347e00,
0.9204422e00,
0.8914999e00,
0.8718511e00,
0.8442892e00,
0.8256361e00,
0.7993970e00,
0.7816934e00,
0.7567216e00,
0.7399231e00,
0.7161648e00,
0.7002287e00,
0.6776319e00,
0.6625175e00,
0.6410319e00,
0.6267001e00,
0.6062772e00,
0.5926905e00,
0.5732835e00,
0.5604061e00,
0.5419698e00,
0.5297674e00,
0.5122584e00,
0.5006981e00,
0.4840745e00,
0.4731249e00,
0.4573463e00,
0.4469774e00,
0.4320051e00,
0.4221882e00,
0.4079849e00,
0.3986924e00,
0.3852225e00,
0.3764283e00,
0.3636572e00,
0.3553362e00,
0.3432310e00,
0.3353594e00,
0.3238883e00,
0.3164434e00,
0.3055761e00,
0.2985361e00,
0.2882435e00,
0.2815877e00,
0.2718419e00,
0.2655505e00,
0.2563248e00,
0.2503791e00,
0.2416479e00,
0.2360299e00,
0.2277687e00,
0.2224615e00,
0.2146470e00,
0.2096341e00,
0.2022440e00,
0.1975101e00,
0.1905229e00,
0.1860533e00,
0.1794487e00,
0.1752294e00,
0.1689879e00,
0.1650056e00,
0.1591086e00,
0.1553506e00,
0.1497804e00,
0.1462348e00,
0.1409744e00,
0.1376297e00,
0.1326632e00,
0.1295086e00,
0.1248204e00,
0.1218456e00,
0.1174212e00,
0.1146165e00,
0.1104419e00,
0.1077980e00,
0.1038600e00,
0.1013680e00,
0.9765404e-01,
0.9530568e-01,
0.9180371e-01,
0.8959107e-01,
0.8628968e-01,
0.8420526e-01,
0.8109362e-01,
0.7913032e-01,
0.7619812e-01,
0.7434919e-01,
0.7158666e-01,
0.6984571e-01,
0.6724356e-01,
0.6560456e-01,
0.6315396e-01,
0.6161117e-01,
0.5930377e-01,
0.5785177e-01,
0.5567961e-01,
0.5431328e-01,
0.5226885e-01,
0.5098332e-01,
0.4905947e-01,
0.4785016e-01,
0.4604013e-01,
0.4490269e-01,
0.4320006e-01,
0.4213037e-01,
0.4052908e-01,
0.3952327e-01,
0.3801755e-01,
0.3707194e-01,
0.3565637e-01,
0.3476748e-01,
0.3343689e-01,
0.3260145e-01,
0.3135097e-01,
0.3056588e-01,
0.2939089e-01,
0.2865322e-01,
0.2754936e-01,
0.2685635e-01,
0.2581950e-01,
0.2516853e-01,
0.2419479e-01,
0.2358340e-01,
0.2266910e-01,
0.2209496e-01,
0.2123660e-01,
0.2069752e-01,
0.1989183e-01,
0.1938574e-01,
0.1862960e-01,
0.1815454e-01,
0.1744504e-01,
0.1699918e-01,
0.1633353e-01,
0.1591513e-01,
0.1529073e-01,
0.1489815e-01,
0.1431255e-01,
0.1394424e-01,
0.1339511e-01,
0.1304962e-01,
0.1253477e-01,
0.1221073e-01,
0.1172810e-01,
0.1142421e-01,
0.1097185e-01,
0.1068691e-01,
0.1026299e-01,
0.9995839e-02,
0.9598632e-02,
0.9348197e-02,
0.8976075e-02,
0.8741341e-02,
0.8392774e-02,
0.8172784e-02,
0.7846332e-02,
0.7640186e-02,
0.7334491e-02,
0.7141344e-02,
0.6855129e-02,
0.6674184e-02,
0.6406246e-02,
0.6236754e-02,
0.5985964e-02,
0.5827218e-02,
0.5592512e-02,
0.5443850e-02,
0.5224227e-02,
0.5085025e-02,
0.4879544e-02,
0.4749216e-02,
0.4556995e-02,
0.4434989e-02,
0.4255197e-02,
0.4140997e-02,
0.3972854e-02,
0.3865974e-02,
0.3708747e-02,
0.3608730e-02,
0.3461733e-02,
0.3368151e-02,
0.3230736e-02,
0.3143185e-02,
0.3014746e-02,
0.2932847e-02,
0.2812813e-02,
0.2736210e-02,
0.2624045e-02,
0.2552404e-02,
0.2447605e-02,
0.2380611e-02,
0.2282708e-02,
0.2220067e-02,
0.2128618e-02,
0.2070056e-02,
0.1984648e-02,
0.1929906e-02,
0.1850152e-02,
0.1798987e-02,
0.1724522e-02,
0.1676708e-02,
0.1607190e-02,
0.1562511e-02,
0.1497620e-02,
0.1455875e-02,
0.1395308e-02,
0.1356308e-02,
0.1299785e-02,
0.1263353e-02,
0.1210611e-02,
0.1176583e-02,
0.1127375e-02,
0.1095598e-02,
0.1049696e-02,
0.1020025e-02,
0.9772121e-03,
0.9495118e-03,
0.9095846e-03,
0.8837259e-03,
0.8464937e-03,
0.8223557e-03,
0.7876397e-03,
0.7651098e-03,
0.7327438e-03,
0.7117181e-03,
0.6815480e-03,
0.6619300e-03,
0.6338120e-03,
0.6155113e-03,
0.5893102e-03,
0.5722408e-03,
0.5478283e-03,
0.5319081e-03,
0.5091634e-03,
0.4943152e-03,
0.4731258e-03,
0.4592787e-03,
0.4395410e-03,
0.4266303e-03,
0.4082493e-03,
0.3962155e-03,
0.3791019e-03,
0.3678880e-03,
0.3519565e-03,
0.3415071e-03,
0.3266762e-03,
0.3169383e-03,
0.3031319e-03,
0.2940574e-03,
0.2812067e-03,
0.2727531e-03,
0.2607960e-03,
0.2529247e-03,
0.2418025e-03,
0.2344753e-03,
0.2241307e-03,
0.2173091e-03,
0.2076865e-03,
0.2013343e-03,
0.1923831e-03,
0.1864691e-03,
0.1781456e-03,
0.1726433e-03,
0.1649075e-03,
0.1597912e-03,
0.1526032e-03,
0.1478454e-03,
0.1411648e-03,
0.1367381e-03,
0.1305278e-03,
0.1264096e-03,
0.1206392e-03,
0.1168121e-03,
0.1114548e-03,
0.1079011e-03,
0.1029286e-03,
0.9962750e-04,
0.9500928e-04,
0.9194008e-04,
0.8765030e-04,
0.8479853e-04,
0.8081775e-04,
0.7817267e-04,
0.7448255e-04,
0.7203030e-04,
0.6860758e-04,
0.6633051e-04,
0.6315326e-04,
0.6103854e-04,
0.5809158e-04,
0.5613237e-04,
0.5340456e-04,
0.5159216e-04,
0.4906537e-04,
0.4738409e-04,
0.4504024e-04,
0.4348066e-04,
0.4130949e-04,
0.3986734e-04,
0.3786116e-04,
0.3653038e-04,
0.3467413e-04,
0.3343939e-04,
0.3171734e-04,
0.3057505e-04,
0.2898660e-04,
0.2793625e-04,
0.2646920e-04,
0.2549635e-04,
0.2413735e-04,
0.2323900e-04,
0.2198645e-04,
0.2116049e-04,
0.2000504e-04,
0.1924253e-04,
0.1817554e-04,
0.1747326e-04,
0.1649066e-04,
0.1584529e-04,
0.1494019e-04,
0.1434660e-04,
0.1351308e-04,
0.1296814e-04,
0.1220153e-04,
0.1170191e-04,
0.1099720e-04,
0.1053953e-04,
0.9892289e-05,
]
return (
ssalb,
len(Legendre_coef),
np.pad(Legendre_coef, (0, 700 - len(Legendre_coef))),
)
def results():
spherical_albedo = 0.1400516239529828
albedo = [
0.57934552e00,
0.55945677e00,
0.53431237e00,
0.50788230e00,
0.48296762e00,
0.46127653e00,
0.44329438e00,
0.42849159e00,
0.41589457e00,
0.40462923e00,
0.39412692e00,
0.38407087e00,
0.37428829e00,
0.36468229e00,
0.35519615e00,
0.34579977e00,
0.33647874e00,
0.32722980e00,
0.31805637e00,
0.30896705e00,
0.29997292e00,
0.29108667e00,
0.28232241e00,
0.27369434e00,
0.26521713e00,
0.25690463e00,
0.24876949e00,
0.24082196e00,
0.23306957e00,
0.22551830e00,
0.21817389e00,
0.21104220e00,
0.20412904e00,
0.19744009e00,
0.19098036e00,
0.18475346e00,
0.17876221e00,
0.17300782e00,
0.16749054e00,
0.16220950e00,
0.15716265e00,
0.15234718e00,
0.14775957e00,
0.14339539e00,
0.13924994e00,
0.13531761e00,
0.13159263e00,
0.12806895e00,
0.12473993e00,
0.12159910e00,
0.11863959e00,
0.11585440e00,
0.11323670e00,
0.11077949e00,
0.10847593e00,
0.10631904e00,
0.10430222e00,
0.10241879e00,
0.10066233e00,
0.99026598e-01,
0.97505502e-01,
0.96093059e-01,
0.94783649e-01,
0.93571737e-01,
0.92452131e-01,
0.91419615e-01,
0.90469383e-01,
0.89596771e-01,
0.88797286e-01,
0.88066630e-01,
0.87400697e-01,
0.86795583e-01,
0.86247541e-01,
0.85752994e-01,
0.85308485e-01,
0.84910698e-01,
0.84556349e-01,
0.84242381e-01,
0.83965667e-01,
0.83723314e-01,
0.83512425e-01,
0.83330259e-01,
0.83174184e-01,
0.83041623e-01,
0.82930155e-01,
0.82837544e-01,
0.82761563e-01,
0.82700156e-01,
0.82651392e-01,
0.82613394e-01,
0.82584500e-01,
0.82563184e-01,
0.82548007e-01,
0.82537644e-01,
0.82530975e-01,
0.82526997e-01,
0.82524881e-01,
0.82523920e-01,
0.82523584e-01,
0.82523517e-01,
]
expected_r1 = np.array(
[
0.38368369e03,
0.25770578e03,
0.23945151e03,
0.16855780e03,
0.18244296e03,
0.16236093e03,
0.10911653e03,
0.12755070e03,
0.12647316e03,
0.10830920e03,
0.67582947e02,
0.83225288e02,
0.89081558e02,
0.82212189e02,
0.66570000e02,
0.39153576e02,
0.50326321e02,
0.57166462e02,
0.56031170e02,
0.47971886e02,
0.36434990e02,
0.21253523e02,
0.28237167e02,
0.33642113e02,
0.34688950e02,
0.31254200e02,
0.25037840e02,
0.18253815e02,
0.11248275e02,
0.15133494e02,
0.18625698e02,
0.20033745e02,
0.18957489e02,
0.16075739e02,
0.12519300e02,
0.92340946e01,
0.62269855e01,
0.82350597e01,
0.10240828e02,
0.11357998e02,
0.11247568e02,
0.10105079e02,
0.84183950e01,
0.66705170e01,
0.51578894e01,
0.37987945e01,
0.48435707e01,
0.59637489e01,
0.67243404e01,
0.69045143e01,
0.65221829e01,
0.57613211e01,
0.48524532e01,
0.39752564e01,
0.32225568e01,
0.25505664e01,
0.31431477e01,
0.38052323e01,
0.43142323e01,
0.45352106e01,
0.44408669e01,
0.40962334e01,
0.36127684e01,
0.30965683e01,
0.26170671e01,
0.22043598e01,
0.18349921e01,
0.22099471e01,
0.26385496e01,
0.29933913e01,
0.31896119e01,
0.31966636e01,
0.30375271e01,
0.27696035e01,
0.24563296e01,
0.21455364e01,
0.18630074e01,
0.16174023e01,
0.13863298e01,
0.16447055e01,
0.19445310e01,
0.22046304e01,
0.23685496e01,
0.24110959e01,
0.23400669e01,
0.21870027e01,
0.19907905e01,
0.17838671e01,
0.15866874e01,
0.14085795e01,
0.12514458e01,
0.10881330e01,
0.12770401e01,
0.14988452e01,
0.16977799e01,
0.18342333e01,
0.18884524e01,
0.18623250e01,
0.17742110e01,
0.16486713e01,
0.15075257e01,
0.13662242e01,
0.12339157e01,
0.11142954e01,
0.10071722e01,
0.88128895e00,
0.10257436e01,
0.11969687e01,
0.13544649e01,
0.14691297e01,
0.15254281e01,
0.15228883e01,
0.14728941e01,
0.13916924e01,
0.12941229e01,
0.11911522e01,
0.10903227e01,
0.99648142e00,
0.91146982e00,
0.83437926e00,
0.73236907e00,
0.84691751e00,
0.98372436e00,
0.11120189e01,
0.12095475e01,
0.12638915e01,
0.12736813e01,
0.12464422e01,
0.11935405e01,
0.11255139e01,
0.10501394e01,
0.97279239e00,
0.89753741e00,
0.82755452e00,
0.76412642e00,
0.70607662e00,
0.62143141e00,
0.71498531e00,
0.82739562e00,
0.93439400e00,
0.10183749e01,
0.10692183e01,
0.10852710e01,
0.10717980e01,
0.10371528e01,
0.98918498e00,
0.93369889e00,
0.87458736e00,
0.81457925e00,
0.75610143e00,
0.70160747e00,
0.65229672e00,
0.60690910e00,
0.53620493e00,
0.61444676e00,
0.70891893e00,
0.79989344e00,
0.87304217e00,
0.91996312e00,
0.93897974e00,
0.93395931e00,
0.91129071e00,
0.87703383e00,
0.83570266e00,
0.79033947e00,
0.74295175e00,
0.69498348e00,
0.64796978e00,
0.60397410e00,
0.56429613e00,
0.52768981e00,
0.46893141e00,
0.53562319e00,
0.61647099e00,
0.69504291e00,
0.75940472e00,
0.80246264e00,
0.82262319e00,
0.82284969e00,
0.80819505e00,
0.78347129e00,
0.75227189e00,
0.71705294e00,
0.67947024e00,
0.64061898e00,
0.60127056e00,
0.56240505e00,
0.52581406e00,
0.49295956e00,
0.46267310e00,
0.41456842e00,
0.47228998e00,
0.54249328e00,
0.61122215e00,
0.66834646e00,
0.70778871e00,
0.72807664e00,
0.73156416e00,
0.72236371e00,
0.70439237e00,
0.68056595e00,
0.65288788e00,
0.62274611e00,
0.59110469e00,
0.55858999e00,
0.52560019e00,
0.49273926e00,
0.46156633e00,
0.43370983e00,
0.40812615e00,
0.36973703e00,
0.42031151e00,
0.48198968e00,
0.54273206e00,
0.59380746e00,
0.62994283e00,
0.64979416e00,
0.65528655e00,
0.64984703e00,
0.63673460e00,
0.61836016e00,
0.59637630e00,
0.57194269e00,
0.54589856e00,
0.51883745e00,
0.49112943e00,
0.46296096e00,
0.43467191e00,
0.40760121e00,
0.38351870e00,
0.36154264e00,
0.33207551e00,
0.37681752e00,
0.43150941e00,
0.48563948e00,
0.53159100e00,
0.56474036e00,
0.58386314e00,
0.59056675e00,
0.58775848e00,
0.57819819e00,
0.56391406e00,
0.54628950e00,
0.52629930e00,
0.50466305e00,
0.48191690e00,
0.45844156e00,
0.43446112e00,
0.41004205e00,
0.38533735e00,
0.36147287e00,
0.34032100e00,
0.32119045e00,
0.29983068e00,
0.33969635e00,
0.38853076e00,
0.43707687e00,
0.47863159e00,
0.50910699e00,
0.52738410e00,
0.53481984e00,
0.53390729e00,
0.52700996e00,
0.51585591e00,
0.50161958e00,
0.48513207e00,
0.46701470e00,
0.44774175e00,
0.42767024e00,
0.40705225e00,
0.38602614e00,
0.36459178e00,
0.34277132e00,
0.32148623e00,
0.30266759e00,
0.28583673e00,
0.27165741e00,
0.30735224e00,
0.35116890e00,
0.39490715e00,
0.43263298e00,
0.46071306e00,
0.47812337e00,
0.48601636e00,
0.48654056e00,
0.48172772e00,
0.47305444e00,
0.46152285e00,
0.44784895e00,
0.43257853e00,
0.41613755e00,
0.39885530e00,
0.38097894e00,
0.36268044e00,
0.34404564e00,
0.32503796e00,
0.30558982e00,
0.28643203e00,
0.26951250e00,
0.25458133e00,
0.24664548e00,
0.27872956e00,
0.31819224e00,
0.35773003e00,
0.39206341e00,
0.41795400e00,
0.43447414e00,
0.44261932e00,
0.44425684e00,
0.44113833e00,
0.43451858e00,
0.42523941e00,
0.41390744e00,
0.40100044e00,
0.38690761e00,
0.37194157e00,
0.35634446e00,
0.34029481e00,
0.32391262e00,
0.30725011e00,
0.29025167e00,
0.27279079e00,
0.25542563e00,
0.24008393e00,
0.22675417e00,
0.22445151e00,
0.25343820e00,
0.28914347e00,
0.32500827e00,
0.35629919e00,
0.38011837e00,
0.39564186e00,
0.40376574e00,
0.40615430e00,
0.40434766e00,
0.39941984e00,
0.39206272e00,
0.38276103e00,
0.37190381e00,
0.35982931e00,
0.34683278e00,
0.33315977e00,
0.31900054e00,
0.30449098e00,
0.28971705e00,
0.27470860e00,
0.25940111e00,
0.24363182e00,
0.22780687e00,
0.21380231e00,
0.20184751e00,
0.20519748e00,
0.23159876e00,
0.26413625e00,
0.29684672e00,
0.32543322e00,
0.34727564e00,
0.36164755e00,
0.36939174e00,
0.37204832e00,
0.37103242e00,
0.36731219e00,
0.36147776e00,
0.35390341e00,
0.34486532e00,
0.33460709e00,
0.32336712e00,
0.31137651e00,
0.29884037e00,
0.28591970e00,
0.27272525e00,
0.25932097e00,
0.24572186e00,
0.23185994e00,
0.21755250e00,
0.20307408e00,
0.19022347e00,
0.17946769e00,
0.18898228e00,
0.21327148e00,
0.24319792e00,
0.27327064e00,
0.29953399e00,
0.31957966e00,
0.33274972e00,
0.33984205e00,
0.34230980e00,
0.34150216e00,
0.33835727e00,
0.33344826e00,
0.32711285e00,
0.31955174e00,
0.31089956e00,
0.30128181e00,
0.29084933e00,
0.27977982e00,
0.26825124e00,
0.25640994e00,
0.24435396e00,
0.23213391e00,
0.21975470e00,
0.20714773e00,
0.19412118e00,
0.18083785e00,
0.16899987e00,
0.15930425e00,
0.17541476e00,
0.19795303e00,
0.22571321e00,
0.25359881e00,
0.27793473e00,
0.29647639e00,
0.30860057e00,
0.31503823e00,
0.31714112e00,
0.31618607e00,
0.31309542e00,
0.30847403e00,
0.30271024e00,
0.29603517e00,
0.28855911e00,
0.28031746e00,
0.27133235e00,
0.26166755e00,
0.25144571e00,
0.24082130e00,
0.22993420e00,
0.21887848e00,
0.20769787e00,
0.19639082e00,
0.18488961e00,
0.17300032e00,
0.16079190e00,
0.14985578e00,
0.14110740e00,
0.16366631e00,
0.18467116e00,
0.21054901e00,
0.23656210e00,
0.25929046e00,
0.27663431e00,
0.28799024e00,
0.29400581e00,
0.29590416e00,
0.29484844e00,
0.29169577e00,
0.28704572e00,
0.28133944e00,
0.27490255e00,
0.26794240e00,
0.26054066e00,
0.25267535e00,
0.24428359e00,
0.23534042e00,
0.22590491e00,
0.21610361e00,
0.20607288e00,
0.19590905e00,
0.18565454e00,
0.17530420e00,
0.16479163e00,
0.15392394e00,
0.14269111e00,
0.13256522e00,
0.12466694e00,
0.15291582e00,
0.17249423e00,
0.19663572e00,
0.22094680e00,
0.24225558e00,
0.25860691e00,
0.26942277e00,
0.27527362e00,
0.27725279e00,
0.27639005e00,
0.27343193e00,
0.26890206e00,
0.26321408e00,
0.25673062e00,
0.24976483e00,
0.24254556e00,
0.23517576e00,
0.22762197e00,
0.21976374e00,
0.21149129e00,
0.20278960e00,
0.19374785e00,
0.18449736e00,
0.17514092e00,
0.16572388e00,
0.15623912e00,
0.14661992e00,
0.13667518e00,
0.12632957e00,
0.11693286e00,
0.10979707e00,
0.14267196e00,
0.16087982e00,
0.18335804e00,
0.20604582e00,
0.22601375e00,
0.24145372e00,
0.25182438e00,
0.25763780e00,
0.25987828e00,
0.25945812e00,
0.25701439e00,
0.25296855e00,
0.24764267e00,
0.24133593e00,
0.23435625e00,
0.22701317e00,
0.21957387e00,
0.21219650e00,
0.20488037e00,
0.19748402e00,
0.18982655e00,
0.18181197e00,
0.17347980e00,
0.16495080e00,
0.15633532e00,
0.14768384e00,
0.13898759e00,
0.13017787e00,
0.12106603e00,
0.11152479e00,
0.10278418e00,
0.96333064e-01,
0.13278867e00,
0.14967601e00,
0.17054874e00,
0.19166234e00,
0.21031891e00,
0.22485405e00,
0.23476954e00,
0.24053511e00,
0.24305005e00,
0.24313784e00,
0.24135487e00,
0.23804264e00,
0.23343392e00,
0.22772875e00,
0.22114034e00,
0.21392128e00,
0.20636588e00,
0.19877388e00,
0.19137226e00,
0.18422794e00,
0.17721902e00,
0.17011781e00,
0.16274700e00,
0.15508358e00,
0.14722840e00,
0.13929746e00,
0.13134745e00,
0.12336826e00,
0.11528943e00,
0.10692697e00,
0.98114364e-01,
0.89966424e-01,
0.84133029e-01,
0.12330588e00,
0.13893479e00,
0.15827183e00,
0.17786814e00,
0.19524175e00,
0.20886324e00,
0.21827731e00,
0.22391967e00,
0.22662000e00,
0.22713451e00,
0.22596700e00,
0.22341314e00,
0.21965274e00,
0.21481460e00,
0.20901735e00,
0.20240258e00,
0.19516377e00,
0.18756257e00,
0.17991112e00,
0.17249928e00,
0.16548070e00,
0.15879016e00,
0.15218471e00,
0.14541364e00,
0.13838096e00,
0.13115591e00,
0.12385615e00,
0.11654575e00,
0.10921578e00,
0.10179584e00,
0.94108447e-01,
0.85958004e-01,
0.78349575e-01,
0.73076993e-01,
0.11430455e00,
0.12874915e00,
0.14663576e00,
0.16478880e00,
0.18092515e00,
0.19363941e00,
0.20251557e00,
0.20795847e00,
0.21073578e00,
0.21154700e00,
0.21085797e00,
0.20893978e00,
0.20595059e00,
0.20198931e00,
0.19712524e00,
0.19142093e00,
0.18495877e00,
0.17787308e00,
0.17037868e00,
0.16277534e00,
0.15539503e00,
0.14847951e00,
0.14204761e00,
0.13587566e00,
0.12965593e00,
0.12321350e00,
0.11657458e00,
0.10985404e00,
0.10312499e00,
0.96382633e-01,
0.89558579e-01,
0.82482606e-01,
0.74937083e-01,
0.67823343e-01,
0.63062489e-01,
0.10584079e00,
0.11918116e00,
0.13571160e00,
0.15250790e00,
0.16746905e00,
0.17930275e00,
0.18762796e00,
0.19281991e00,
0.19558841e00,
0.19658093e00,
0.19623034e00,
0.19479063e00,
0.19241145e00,
0.18918501e00,
0.18516850e00,
0.18039672e00,
0.17489515e00,
0.16870056e00,
0.16189243e00,
0.15463088e00,
0.14718156e00,
0.13989125e00,
0.13307634e00,
0.12685405e00,
0.12105249e00,
0.11533057e00,
0.10943508e00,
0.10333905e00,
0.97149357e-01,
0.90949543e-01,
0.84741533e-01,
0.78459755e-01,
0.71940817e-01,
0.64950287e-01,
0.58292422e-01,
0.53999700e-01,
0.97934157e-01,
0.11025076e00,
0.12552127e00,
0.14105226e00,
0.15490949e00,
0.16590366e00,
0.17368492e00,
0.17860012e00,
0.18130451e00,
0.18239634e00,
0.18227696e00,
0.18118428e00,
0.17926148e00,
0.17660023e00,
0.17325978e00,
0.16927499e00,
0.16466121e00,
0.15942113e00,
0.15355882e00,
0.14710733e00,
0.14017075e00,
0.13296603e00,
0.12582819e00,
0.11912578e00,
0.11307607e00,
0.10758535e00,
0.10230618e00,
0.96913703e-01,
0.91320075e-01,
0.85618503e-01,
0.79903029e-01,
0.74183889e-01,
0.68398476e-01,
0.62389236e-01,
0.55908926e-01,
0.49671900e-01,
0.45807466e-01,
0.90577021e-01,
0.10194721e00,
0.11605130e00,
0.13040775e00,
0.14323507e00,
0.15343815e00,
0.16069512e00,
0.16532598e00,
0.16793491e00,
0.16907367e00,
0.16911317e00,
0.16827461e00,
0.16669342e00,
0.16445951e00,
0.16163501e00,
0.15826119e00,
0.15436088e00,
0.14993919e00,
0.14498582e00,
0.13948333e00,
0.13342866e00,
0.12687427e00,
0.11998184e00,
0.11305442e00,
0.10648688e00,
0.10058656e00,
0.95354967e-01,
0.90465494e-01,
0.85533582e-01,
0.80405675e-01,
0.75155161e-01,
0.69885492e-01,
0.64615801e-01,
0.59286319e-01,
0.53744264e-01,
0.47732841e-01,
0.41884389e-01,
0.38411867e-01,
0.83746620e-01,
0.94243065e-01,
0.10726915e00,
0.12053798e00,
0.13240825e00,
0.14187106e00,
0.14862999e00,
0.15297991e00,
0.15547749e00,
0.15663114e00,
0.15678266e00,
0.15613645e00,
0.15481880e00,
0.15291539e00,
0.15048827e00,
0.14758278e00,
0.14422987e00,
0.14044581e00,
0.13623075e00,
0.13156785e00,
0.12642694e00,
0.12077967e00,
0.11463551e00,
0.10810074e00,
0.10143317e00,
0.95029272e-01,
0.89268476e-01,
0.84254339e-01,
0.79705626e-01,
0.75194120e-01,
0.70498869e-01,
0.65667070e-01,
0.60809318e-01,
0.55953730e-01,
0.51043399e-01,
0.45929730e-01,
0.40349029e-01,
0.34859274e-01,
0.31745121e-01,
0.77413671e-01,
0.87103941e-01,
0.99134557e-01,
0.11139757e00,
0.12238043e00,
0.13115339e00,
0.13744320e00,
0.14152151e00,
0.14390105e00,
0.14504991e00,
0.14528263e00,
0.14478727e00,
0.14368038e00,
0.14204189e00,
0.13993120e00,
0.13739403e00,
0.13446525e00,
0.13116941e00,
0.12751934e00,
0.12351336e00,
0.11913250e00,
0.11434042e00,
0.10909266e00,
0.10336579e00,
0.97215243e-01,
0.90846524e-01,
0.84636919e-01,
0.79015903e-01,
0.74187510e-01,
0.69935963e-01,
0.65807395e-01,
0.61514482e-01,
0.57072140e-01,
0.52595474e-01,
0.48121743e-01,
0.43596964e-01,
0.38876079e-01,
0.33690531e-01,
0.28531600e-01,
0.25744777e-01,
0.71547434e-01,
0.80494061e-01,
0.91605820e-01,
0.10293934e00,
0.11310040e00,
0.12123200e00,
0.12708212e00,
0.13090093e00,
0.13316067e00,
0.13429219e00,
0.13458471e00,
0.13421088e00,
0.13327757e00,
0.13185826e00,
0.13000821e00,
0.12777114e00,
0.12518245e00,
0.12227035e00,
0.11905541e00,
0.11554869e00,
0.11174847e00,
0.10763626e00,
0.10317403e00,
0.98308414e-01,
0.92992358e-01,
0.87237559e-01,
0.81194960e-01,
0.75207628e-01,
0.69733076e-01,
0.65067738e-01,
0.61075501e-01,
0.57294834e-01,
0.53375702e-01,
0.49295910e-01,
0.45172136e-01,
0.41050550e-01,
0.36880266e-01,
0.32519296e-01,
0.27695602e-01,
0.22840958e-01,
0.20352198e-01,
0.66117376e-01,
0.74378133e-01,
0.84641933e-01,
0.95116824e-01,
0.10451740e00,
0.11205351e00,
0.11749266e00,
0.12106522e00,
0.12320609e00,
0.12431186e00,
0.12464833e00,
0.12437376e00,
0.12358582e00,
0.12235164e00,
0.12072182e00,
0.11873700e00,
0.11643104e00,
0.11383259e00,
0.11096542e00,
0.10784754e00,
0.10448926e00,
0.10088971e00,
0.97032204e-01,
0.92879705e-01,
0.88374905e-01,
0.83454721e-01,
0.78095064e-01,
0.72394073e-01,
0.66651307e-01,
0.61332978e-01,
0.56815393e-01,
0.53050254e-01,
0.49585145e-01,
0.46012942e-01,
0.42270541e-01,
0.38473442e-01,
0.34676433e-01,
0.30831696e-01,
0.26799770e-01,
0.22306219e-01,
0.17730433e-01,
0.15513073e-01,
0.61093956e-01,
0.68722166e-01,
0.78203514e-01,
0.87885372e-01,
0.96582450e-01,
0.10356604e00,
0.10862161e00,
0.11196126e00,
0.11398555e00,
0.11505950e00,
0.11542739e00,
0.11523422e00,
0.11456916e00,
0.11349328e00,
0.11205266e00,
0.11028446e00,
0.10822011e00,
0.10588704e00,
0.10330941e00,
0.10050797e00,
0.97499035e-01,
0.94292536e-01,
0.90888672e-01,
0.87273069e-01,
0.83411098e-01,
0.79244599e-01,
0.74700132e-01,
0.69725469e-01,
0.64370766e-01,
0.58887802e-01,
0.53736102e-01,
0.49357101e-01,
0.45792568e-01,
0.42613342e-01,
0.39362472e-01,
0.35933696e-01,
0.32438610e-01,
0.28940061e-01,
0.25393331e-01,
0.21661280e-01,
0.17468622e-01,
0.13150477e-01,
0.11185951e-01,
0.56448560e-01,
0.63493401e-01,
0.72252735e-01,
0.81202179e-01,
0.89248493e-01,
0.95719531e-01,
0.10041723e00,
0.10353691e00,
0.10544755e00,
0.10648517e00,
0.10687432e00,
0.10674787e00,
0.10618718e00,
0.10524774e00,
0.10397130e00,
0.10239167e00,
0.10053762e00,
0.98434702e-01,
0.96106045e-01,
0.93572617e-01,
0.90852953e-01,
0.87962106e-01,
0.84909752e-01,
0.81697099e-01,
0.78312054e-01,
0.74722745e-01,
0.70871852e-01,
0.66679642e-01,
0.62072858e-01,
0.57060491e-01,
0.51846057e-01,
0.46870694e-01,
0.42625420e-01,
0.39239943e-01,
0.36319576e-01,
0.33365458e-01,
0.30227283e-01,
0.27010450e-01,
0.23785481e-01,
0.20511542e-01,
0.17054949e-01,
0.13141878e-01,
0.90698684e-02,
0.73469649e-02,
0.52153420e-01,
0.58660157e-01,
0.66753164e-01,
0.75026073e-01,
0.82470380e-01,
0.88465959e-01,
0.92829920e-01,
0.95742144e-01,
0.97542584e-01,
0.98540656e-01,
0.98942772e-01,
0.98870747e-01,
0.98398849e-01,
0.97577512e-01,
0.96444599e-01,
0.95030688e-01,
0.93361929e-01,
0.91461726e-01,
0.89351647e-01,
0.87051816e-01,
0.84580876e-01,
0.81955560e-01,
0.79189852e-01,
0.76293178e-01,
0.73267482e-01,
0.70102490e-01,
0.66768855e-01,
0.63210987e-01,
0.59346184e-01,
0.55086352e-01,
0.50406374e-01,
0.45463238e-01,
0.40671837e-01,
0.36558144e-01,
0.33334181e-01,
0.30647837e-01,
0.27966481e-01,
0.25096513e-01,
0.22136096e-01,
0.19164244e-01,
0.16145656e-01,
0.12950187e-01,
0.93025165e-02,
0.54607159e-02,
0.39474810e-02,
0.48182234e-01,
0.54192506e-01,
0.61670437e-01,
0.69318332e-01,
0.76205671e-01,
0.81760220e-01,
0.85813068e-01,
0.88529900e-01,
0.90223983e-01,
0.91180287e-01,
0.91588661e-01,
0.91560833e-01,
0.91164641e-01,
0.90445958e-01,
0.89439072e-01,
0.88171646e-01,
0.86667374e-01,
0.84947526e-01,
0.83031908e-01,
0.80939271e-01,
0.78687482e-01,
0.76293327e-01,
0.73772058e-01,
0.71136616e-01,
0.68396017e-01,
0.65552600e-01,
0.62597387e-01,
0.59503239e-01,
0.56216817e-01,
0.52654829e-01,
0.48719283e-01,
0.44357602e-01,
0.39683431e-01,
0.35080492e-01,
0.31097105e-01,
0.28020034e-01,
0.25545072e-01,
0.23114407e-01,
0.20494236e-01,
0.17775815e-01,
0.15046233e-01,
0.12272255e-01,
0.93193343e-02,
0.59020361e-02,
0.22407323e-02,
0.87492354e-03,
0.44510506e-01,
0.50062627e-01,
0.56972671e-01,
0.64043038e-01,
0.70415020e-01,
0.75560495e-01,
0.79323418e-01,
0.81856459e-01,
0.83448350e-01,
0.84361628e-01,
0.84770963e-01,
0.84778860e-01,
0.84447332e-01,
0.83818108e-01,
0.82922280e-01,
0.81784874e-01,
0.80427296e-01,
0.78868859e-01,
0.77127583e-01,
0.75220726e-01,
0.73164918e-01,
0.70976183e-01,
0.68669744e-01,
0.66259526e-01,
0.63757502e-01,
0.61172180e-01,
0.58505908e-01,
0.55750374e-01,
0.52880324e-01,
0.49845133e-01,
0.46562638e-01,
0.42928446e-01,
0.38867969e-01,
0.34455679e-01,
0.30041935e-01,
0.26187586e-01,
0.23246434e-01,
0.20965882e-01,
0.18771386e-01,
0.16391607e-01,
0.13906728e-01,
0.11403601e-01,
0.88423118e-02,
0.60800756e-02,
0.28280553e-02,
-0.70986536e-03,
-0.19648359e-02,
0.41115671e-01,
0.46244897e-01,
0.52630525e-01,
0.59167176e-01,
0.65062307e-01,
0.69828428e-01,
0.73321380e-01,
0.75681835e-01,
0.77175975e-01,
0.78045711e-01,
0.78451805e-01,
0.78488372e-01,
0.78212120e-01,
0.77661060e-01,
0.76863378e-01,
0.75841703e-01,
0.74615397e-01,
0.73201917e-01,
0.71617633e-01,
0.69878295e-01,
0.67999192e-01,
0.65995254e-01,
0.63880973e-01,
0.61670251e-01,
0.59375945e-01,
0.57009004e-01,
0.54577064e-01,
0.52082047e-01,
0.49516134e-01,
0.46855822e-01,
0.44053324e-01,
0.41028392e-01,
0.37672661e-01,
0.33894073e-01,
0.29732887e-01,
0.25506891e-01,
0.21783372e-01,
0.18975813e-01,
0.16882956e-01,
0.14915733e-01,
0.12760971e-01,
0.10479322e-01,
0.81534600e-02,
0.57432470e-02,
0.31131236e-02,
-0.13105665e-04,
-0.34361165e-02,
-0.45660972e-02,
0.37977196e-01,
0.42716030e-01,
0.48617344e-01,
0.54660756e-01,
0.60114693e-01,
0.64529106e-01,
0.67770794e-01,
0.69969401e-01,
0.71370378e-01,
0.72196685e-01,
0.72596297e-01,
0.72655670e-01,
0.72426699e-01,
0.71944013e-01,
0.71233213e-01,
0.70314772e-01,
0.69206156e-01,
0.67923151e-01,
0.66480570e-01,
0.64892717e-01,
0.63173585e-01,
0.61336990e-01,
0.59396494e-01,
0.57365343e-01,
0.55256244e-01,
0.53080887e-01,
0.50849102e-01,
0.48567444e-01,
0.46237227e-01,
0.43851011e-01,
0.41386928e-01,
0.38799874e-01,
0.36011849e-01,
0.32912444e-01,
0.29396715e-01,
0.25476838e-01,
0.21440787e-01,
0.17856820e-01,
0.15185956e-01,
0.13268581e-01,
0.11497635e-01,
0.95190108e-02,
0.73810630e-02,
0.51771011e-02,
0.28821086e-02,
0.37416635e-03,
-0.26154167e-02,
-0.58998531e-02,
-0.68843709e-02,
0.35076261e-01,
0.39454699e-01,
0.44908728e-01,
0.50496329e-01,
0.55542119e-01,
0.59630550e-01,
0.62638551e-01,
0.64685628e-01,
0.65998107e-01,
0.66781543e-01,
0.67172192e-01,
0.67249492e-01,
0.67060962e-01,
0.66638172e-01,
0.66004358e-01,
0.65178059e-01,
0.64175054e-01,
0.63009582e-01,
0.61695036e-01,
0.60244419e-01,
0.58670532e-01,
0.56986067e-01,
0.55203587e-01,
0.53335473e-01,
0.51393870e-01,
0.49390342e-01,
0.47335327e-01,
0.45237295e-01,
0.43101642e-01,
0.40929142e-01,
0.38712744e-01,
0.36432110e-01,
0.34044892e-01,
0.31476185e-01,
0.28615938e-01,
0.25350343e-01,
0.21667363e-01,
0.17823832e-01,
0.14378536e-01,
0.11824818e-01,
0.10038571e-01,
0.84049767e-02,
0.65482627e-02,
0.45205113e-02,
0.24313403e-02,
0.26542676e-03,
-0.20983638e-02,
-0.49343021e-02,
-0.80712391e-02,
-0.89136148e-02,
0.32395583e-01,
0.36441319e-01,
0.41482292e-01,
0.46648715e-01,
0.51316999e-01,
0.55103421e-01,
0.57894230e-01,
0.59799597e-01,
0.61028276e-01,
0.61769772e-01,
0.62149592e-01,
0.62240742e-01,
0.62086754e-01,
0.61716419e-01,
0.61150856e-01,
0.60406826e-01,
0.59498589e-01,
0.58439020e-01,
0.57240289e-01,
0.55914193e-01,
0.54472402e-01,
0.52926507e-01,
0.51288098e-01,
0.49568728e-01,
0.47779780e-01,
0.45932278e-01,
0.44036478e-01,
0.42101391e-01,
0.40134147e-01,
0.38139164e-01,
0.36116980e-01,
0.34061395e-01,
0.31954251e-01,
0.29756844e-01,
0.27398031e-01,
0.24767753e-01,
0.21741455e-01,
0.18279733e-01,
0.14604551e-01,
0.11262298e-01,
0.87792939e-02,
0.70768204e-02,
0.55486909e-02,
0.38075072e-02,
0.19052560e-02,
-0.45109729e-04,
-0.20623163e-02,
-0.42746197e-02,
-0.69636726e-02,
-0.99668913e-02,
-0.10686405e-01,
0.29919144e-01,
0.33657782e-01,
0.38317338e-01,
0.43094639e-01,
0.47413833e-01,
0.50920542e-01,
0.53509615e-01,
0.55282630e-01,
0.56432150e-01,
0.57132918e-01,
0.57500545e-01,
0.57602141e-01,
0.57477590e-01,
0.57153169e-01,
0.56648072e-01,
0.55977501e-01,
0.55154376e-01,
0.54190353e-01,
0.53096451e-01,
0.51883381e-01,
0.50561778e-01,
0.49142279e-01,
0.47635533e-01,
0.46052203e-01,
0.44402875e-01,
0.42697888e-01,
0.40947042e-01,
0.39159160e-01,
0.37341885e-01,
0.35501439e-01,
0.33642113e-01,
0.31765264e-01,
0.29866640e-01,
0.27930658e-01,
0.25920473e-01,
0.23763975e-01,
0.21344284e-01,
0.18521296e-01,
0.15230944e-01,
0.11671958e-01,
0.83932094e-02,
0.59620556e-02,
0.43443809e-02,
0.29380166e-02,
0.13360849e-02,
-0.42020026e-03,
-0.22225457e-02,
-0.40951176e-02,
-0.61711343e-02,
-0.87360274e-02,
-0.11627702e-01,
-0.12245870e-01,
0.27632145e-01,
0.31087343e-01,
0.35394758e-01,
0.39812610e-01,
0.43809064e-01,
0.47056779e-01,
0.49458548e-01,
0.51108032e-01,
0.52182894e-01,
0.52844279e-01,
0.53198714e-01,
0.53307895e-01,
0.53208377e-01,
0.52924160e-01,
0.52472707e-01,
0.51867817e-01,
0.51121201e-01,
0.50243411e-01,
0.49244415e-01,
0.48133992e-01,
0.46921846e-01,
0.45617707e-01,
0.44231363e-01,
0.42772591e-01,
0.41251190e-01,
0.39676767e-01,
0.38058497e-01,
0.36404900e-01,
0.34723751e-01,
0.33022195e-01,
0.31306833e-01,
0.29583117e-01,
0.27853649e-01,
0.26114259e-01,
0.24346959e-01,
0.22509510e-01,
0.20522581e-01,
0.18262342e-01,
0.15582572e-01,
0.12412144e-01,
0.89437887e-02,
0.57348427e-02,
0.33833538e-02,
0.18811167e-02,
0.61761297e-03,
-0.83635934e-03,
-0.24498748e-02,
-0.41174246e-02,
-0.58653555e-02,
-0.78286622e-02,
-0.10294430e-01,
-0.13095230e-01,
-0.13630256e-01,
0.25520688e-01,
0.28714316e-01,
0.32696646e-01,
0.36782503e-01,
0.40480625e-01,
0.43488596e-01,
0.45716532e-01,
0.47250807e-01,
0.48255347e-01,
0.48878800e-01,
0.49219336e-01,
0.49333718e-01,
0.49255468e-01,
0.49006518e-01,
0.48602745e-01,
0.48056658e-01,
0.47378853e-01,
0.46578914e-01,
0.45665938e-01,
0.44648807e-01,
0.43536380e-01,
0.42337555e-01,
0.41061286e-01,
0.39716586e-01,
0.38312454e-01,
0.36857765e-01,
0.35361193e-01,
0.33831026e-01,
0.32275263e-01,
0.30701900e-01,
0.29118838e-01,
0.27533358e-01,
0.25950057e-01,
0.24367830e-01,
0.22775861e-01,
0.21148378e-01,
0.19438008e-01,
0.17566392e-01,
0.15416590e-01,
0.12849388e-01,
0.97933495e-02,
0.64339326e-02,
0.33275706e-02,
0.10872351e-02,
-0.28362754e-03,
-0.14064757e-02,
-0.27260752e-02,
-0.42158812e-02,
-0.57702521e-02,
-0.74158944e-02,
-0.92886547e-02,
-0.11677106e-01,
-0.14403825e-01,
-0.14870039e-01,
0.23571694e-01,
0.26523935e-01,
0.30206172e-01,
0.33985410e-01,
0.37407782e-01,
0.40193867e-01,
0.42260528e-01,
0.43687437e-01,
0.44625875e-01,
0.45212984e-01,
0.45539256e-01,
0.45656916e-01,
0.45596711e-01,
0.45378670e-01,
0.45017261e-01,
0.44523854e-01,
0.43908067e-01,
0.43178570e-01,
0.42343616e-01,
0.41411307e-01,
0.40389728e-01,
0.39287016e-01,
0.38111381e-01,
0.36871076e-01,
0.35574421e-01,
0.34229808e-01,
0.32845549e-01,
0.31430013e-01,
0.29991681e-01,
0.28539073e-01,
0.27080379e-01,
0.25622051e-01,
0.24167063e-01,
0.22712942e-01,
0.21250511e-01,
0.19763654e-01,
0.18227577e-01,
0.16602959e-01,
0.14823279e-01,
0.12781378e-01,
0.10339825e-01,
0.74180500e-02,
0.41875704e-02,
0.12003324e-02,
-0.92070986e-03,
-0.21659597e-02,
-0.31659470e-02,
-0.43731593e-02,
-0.57607451e-02,
-0.72222133e-02,
-0.87847579e-02,
-0.10585406e-01,
-0.12914552e-01,
-0.15580352e-01,
-0.15989216e-01,
0.21772955e-01,
0.24502428e-01,
0.27907638e-01,
0.31403694e-01,
0.34571216e-01,
0.37151974e-01,
0.39069071e-01,
0.40396009e-01,
0.41272413e-01,
0.41824844e-01,
0.42136710e-01,
0.42256072e-01,
0.42211138e-01,
0.42020235e-01,
0.41696560e-01,
0.41250426e-01,
0.40690560e-01,
0.40024836e-01,
0.39260726e-01,
0.38405582e-01,
0.37466776e-01,
0.36451757e-01,
0.35368055e-01,
0.34223344e-01,
0.33025496e-01,
0.31782612e-01,
0.30503126e-01,
0.29195679e-01,
0.27868953e-01,
0.26531136e-01,
0.25188750e-01,
0.23845278e-01,
0.22500057e-01,
0.21148371e-01,
0.19782964e-01,
0.18395819e-01,
0.16978383e-01,
0.15516935e-01,
0.13982293e-01,
0.12313342e-01,
0.10401287e-01,
0.81002032e-02,
0.53164582e-02,
0.22110264e-02,
-0.66281413e-03,
-0.26719193e-02,
-0.38054741e-02,
-0.47028475e-02,
-0.58184257e-02,
-0.71222009e-02,
-0.85073700e-02,
-0.10002246e-01,
-0.11745773e-01,
-0.14030821e-01,
-0.16646272e-01,
-0.17007222e-01,
0.20113155e-01,
0.22637051e-01,
0.25786523e-01,
0.29021049e-01,
0.31953044e-01,
0.34343820e-01,
0.36122233e-01,
0.37356097e-01,
0.38174324e-01,
0.38693711e-01,
0.38991190e-01,
0.39110996e-01,
0.39079025e-01,
0.38912032e-01,
0.38622018e-01,
0.38218360e-01,
0.37708975e-01,
0.37100986e-01,
0.36401182e-01,
0.35616249e-01,
0.34752883e-01,
0.33817910e-01,
0.32818370e-01,
0.31761579e-01,
0.30655265e-01,
0.29507659e-01,
0.28327364e-01,
0.27123058e-01,
0.25902657e-01,
0.24672238e-01,
0.23434937e-01,
0.22190256e-01,
0.20934626e-01,
0.19663457e-01,
0.18373784e-01,
0.17065847e-01,
0.15741942e-01,
0.14402619e-01,
0.13038933e-01,
0.11620902e-01,
0.10082259e-01,
0.83062556e-02,
0.61375611e-02,
0.34738728e-02,
0.47380762e-03,
-0.23008718e-02,
-0.42078327e-02,
-0.52424278e-02,
-0.60545313e-02,
-0.70956550e-02,
-0.83303098e-02,
-0.96523445e-02,
-0.11092181e-01,
-0.12791218e-01,
-0.15045415e-01,
-0.17619489e-01,
-0.17940814e-01,
0.18581720e-01,
0.20915883e-01,
0.23829265e-01,
0.26822245e-01,
0.29536562e-01,
0.31751547e-01,
0.33401374e-01,
0.34548633e-01,
0.35312355e-01,
0.35800364e-01,
0.36083620e-01,
0.36202855e-01,
0.36181841e-01,
0.36035892e-01,
0.35775941e-01,
0.35410490e-01,
0.34946699e-01,
0.34391019e-01,
0.33749603e-01,
0.33028524e-01,
0.32233991e-01,
0.31372394e-01,
0.30450473e-01,
0.29475490e-01,
0.28455326e-01,
0.27398327e-01,
0.26312927e-01,
0.25206672e-01,
0.24085123e-01,
0.22950860e-01,
0.21803081e-01,
0.20638589e-01,
0.19453924e-01,
0.18248010e-01,
0.17023819e-01,
0.15788162e-01,
0.14549590e-01,
0.13314429e-01,
0.12082065e-01,
0.10838719e-01,
0.95478874e-02,
0.81366943e-02,
0.64820210e-02,
0.44224774e-02,
0.18523625e-02,
-0.10648414e-02,
-0.37534775e-02,
-0.55650654e-02,
-0.65100682e-02,
-0.72507914e-02,
-0.82312562e-02,
-0.94086677e-02,
-0.10678349e-01,
-0.12073795e-01,
-0.13739666e-01,
-0.15975196e-01,
-0.18515551e-01,
-0.18804787e-01,
0.17168749e-01,
0.19327780e-01,
0.22023177e-01,
0.24793051e-01,
0.27306184e-01,
0.29358532e-01,
0.30889180e-01,
0.31955894e-01,
0.32668613e-01,
0.33126883e-01,
0.33396173e-01,
0.33513986e-01,
0.33502162e-01,
0.33374704e-01,
0.33141572e-01,
0.32810479e-01,
0.32387879e-01,
0.31879608e-01,
0.31291280e-01,
0.30628527e-01,
0.29897207e-01,
0.29103544e-01,
0.28254325e-01,
0.27356898e-01,
0.26419124e-01,
0.25448931e-01,
0.24453351e-01,
0.23437385e-01,
0.22403084e-01,
0.21349376e-01,
0.20273272e-01,
0.19171963e-01,
0.18045446e-01,
0.16897894e-01,
0.15737535e-01,
0.14574960e-01,
0.13420397e-01,
0.12280968e-01,
0.11158198e-01,
0.10045296e-01,
0.89227157e-02,
0.77492185e-02,
0.64479085e-02,
0.48916428e-02,
0.29151232e-02,
0.41272587e-03,
-0.24413855e-02,
-0.50533284e-02,
-0.67725605e-02,
-0.76342914e-02,
-0.83150435e-02,
-0.92462925e-02,
-0.10376302e-01,
-0.11603034e-01,
-0.12963645e-01,
-0.14606736e-01,
-0.16835131e-01,
-0.19348454e-01,
-0.19612487e-01,
0.15865134e-01,
0.17862506e-01,
0.20356622e-01,
0.22920400e-01,
0.25247563e-01,
0.27149413e-01,
0.28569562e-01,
0.29561354e-01,
0.30226331e-01,
0.30656436e-01,
0.30912070e-01,
0.31027781e-01,
0.31023609e-01,
0.30912362e-01,
0.30703111e-01,
0.30402854e-01,
0.30017478e-01,
0.29552329e-01,
0.29012615e-01,
0.28403712e-01,
0.27731372e-01,
0.27001891e-01,
0.26222091e-01,
0.25399221e-01,
0.24540421e-01,
0.23651907e-01,
0.22738006e-01,
0.21800319e-01,
0.20837659e-01,
0.19847298e-01,
0.18827075e-01,
0.17777784e-01,
0.16704248e-01,
0.15615269e-01,
0.14521973e-01,
0.13435473e-01,
0.12364727e-01,
0.11315233e-01,
0.10288181e-01,
0.92796814e-02,
0.82787825e-02,
0.72630350e-02,
0.61889994e-02,
0.49772034e-02,
0.34969368e-02,
0.15794969e-02,
-0.87806711e-03,
-0.36851568e-02,
-0.62257764e-02,
-0.78527220e-02,
-0.86356523e-02,
-0.92662042e-02,
-0.10158284e-01,
-0.11249557e-01,
-0.12441714e-01,
-0.13776368e-01,
-0.15406553e-01,
-0.17638773e-01,
-0.20130811e-01,
-0.20375945e-01,
0.14662431e-01,
0.16510550e-01,
0.18818781e-01,
0.21192145e-01,
0.23347380e-01,
0.25109937e-01,
0.26427617e-01,
0.27349673e-01,
0.27969934e-01,
0.28373329e-01,
0.28615609e-01,
0.28728599e-01,
0.28730700e-01,
0.28633634e-01,
0.28445678e-01,
0.28173234e-01,
0.27821736e-01,
0.27396221e-01,
0.26901733e-01,
0.26343603e-01,
0.25727598e-01,
0.25059966e-01,
0.24347208e-01,
0.23595579e-01,
0.22810331e-01,
0.21994932e-01,
0.21150416e-01,
0.20275565e-01,
0.19368108e-01,
0.18426621e-01,
0.17452605e-01,
0.16451407e-01,
0.15432071e-01,
0.14405738e-01,
0.13383573e-01,
0.12374919e-01,
0.11386278e-01,
0.10420920e-01,
0.94791828e-02,
0.85583618e-02,
0.76520443e-02,
0.67478423e-02,
0.58222217e-02,
0.48302943e-02,
0.36898023e-02,
0.22657616e-02,
0.38620809e-03,
-0.20464570e-02,
-0.48193890e-02,
-0.72907535e-02,
-0.88232690e-02,
-0.95311515e-02,
-0.10120570e-01,
-0.10982438e-01,
-0.12042844e-01,
-0.13208187e-01,
-0.14525170e-01,
-0.16151898e-01,
-0.18398402e-01,
-0.20873869e-01,
-0.21105917e-01,
0.13552637e-01,
0.15262923e-01,
0.17399436e-01,
0.19596824e-01,
0.21593064e-01,
0.23226669e-01,
0.24449309e-01,
0.25306473e-01,
0.25884863e-01,
0.26262935e-01,
0.26492154e-01,
0.26601870e-01,
0.26609030e-01,
0.26524415e-01,
0.26355645e-01,
0.26108669e-01,
0.25788641e-01,
0.25400463e-01,
0.24949163e-01,
0.24440058e-01,
0.23878768e-01,
0.23270955e-01,
0.22621866e-01,
0.21935685e-01,
0.21214921e-01,
0.20460036e-01,
0.19669790e-01,
0.18842377e-01,
0.17977156e-01,
0.17076379e-01,
0.16145866e-01,
0.15194753e-01,
0.14234038e-01,
0.13274660e-01,
0.12325864e-01,
0.11394359e-01,
0.10484057e-01,
0.95965564e-02,
0.87314118e-02,
0.78865895e-02,
0.70581776e-02,
0.62391688e-02,
0.54168063e-02,
0.45667454e-02,
0.36422682e-02,
0.25576062e-02,
0.11724485e-02,
-0.68855018e-03,
-0.31142109e-02,
-0.58629848e-02,
-0.82644373e-02,
-0.96990969e-02,
-0.10335494e-01,
-0.10892544e-01,
-0.11732457e-01,
-0.12769138e-01,
-0.13914877e-01,
-0.15221934e-01,
-0.16854212e-01,
-0.19125070e-01,
-0.21587910e-01,
-0.21812512e-01,
0.12528375e-01,
0.14111324e-01,
0.16089143e-01,
0.18123835e-01,
0.19972973e-01,
0.21487134e-01,
0.22621555e-01,
0.23418266e-01,
0.23957409e-01,
0.24311502e-01,
0.24528088e-01,
0.24634261e-01,
0.24645658e-01,
0.24572272e-01,
0.24421265e-01,
0.24198342e-01,
0.23908518e-01,
0.23556618e-01,
0.23147549e-01,
0.22686291e-01,
0.22177676e-01,
0.21625938e-01,
0.21034168e-01,
0.20403879e-01,
0.19734910e-01,
0.19025959e-01,
0.18275727e-01,
0.17484382e-01,
0.16654858e-01,
0.15793348e-01,
0.14909030e-01,
0.14012692e-01,
0.13114928e-01,
0.12224767e-01,
0.11348879e-01,
0.10491370e-01,
0.96542966e-02,
0.88380333e-02,
0.80419006e-02,
0.72643128e-02,
0.65027624e-02,
0.57532680e-02,
0.50087180e-02,
0.42559886e-02,
0.34697335e-02,
0.26006724e-02,
0.15582409e-02,
0.19586713e-03,
-0.16648462e-02,
-0.41000666e-02,
-0.68322704e-02,
-0.91604441e-02,
-0.10493010e-01,
-0.11061922e-01,
-0.11595163e-01,
-0.12420692e-01,
-0.13440105e-01,
-0.14572812e-01,
-0.15877264e-01,
-0.17524028e-01,
-0.19829245e-01,
-0.22282675e-01,
-0.22505168e-01,
0.11582712e-01,
0.13047962e-01,
0.14879054e-01,
0.16763248e-01,
0.18476224e-01,
0.19879704e-01,
0.20932244e-01,
0.21672688e-01,
0.22175148e-01,
0.22506684e-01,
0.22711273e-01,
0.22814000e-01,
0.22829419e-01,
0.22766909e-01,
0.22633271e-01,
0.22434004e-01,
0.22174012e-01,
0.21857906e-01,
0.21490037e-01,
0.21074377e-01,
0.20614149e-01,
0.20111440e-01,
0.19566908e-01,
0.18979838e-01,
0.18348834e-01,
0.17672971e-01,
0.16953107e-01,
0.16192837e-01,
0.15398719e-01,
0.14579884e-01,
0.13746735e-01,
0.12909485e-01,
0.12076905e-01,
0.11255554e-01,
0.10449625e-01,
0.96614184e-02,
0.88916803e-02,
0.81402799e-02,
0.74064764e-02,
0.66891308e-02,
0.59866421e-02,
0.52965824e-02,
0.46150312e-02,
0.39348379e-02,
0.32422331e-02,
0.25101355e-02,
0.16859862e-02,
0.67323016e-03,
-0.68223715e-03,
-0.25608686e-02,
-0.50209761e-02,
-0.77415816e-02,
-0.99905552e-02,
-0.11216282e-01,
-0.11722405e-01,
-0.12240239e-01,
-0.13058126e-01,
-0.14066087e-01,
-0.15192053e-01,
-0.16501144e-01,
-0.18171404e-01,
-0.20520791e-01,
-0.22966992e-01,
-0.23192288e-01,
0.10709423e-01,
0.12065823e-01,
0.13761187e-01,
0.15506121e-01,
0.17093049e-01,
0.18394005e-01,
0.19370625e-01,
0.20058842e-01,
0.20527244e-01,
0.20837912e-01,
0.21031609e-01,
0.21131653e-01,
0.21151649e-01,
0.21100447e-01,
0.20984545e-01,
0.20809161e-01,
0.20578744e-01,
0.20297162e-01,
0.19967660e-01,
0.19592566e-01,
0.19173032e-01,
0.18708948e-01,
0.18199200e-01,
0.17642427e-01,
0.17038029e-01,
0.16387362e-01,
0.15694451e-01,
0.14966104e-01,
0.14211375e-01,
0.13440318e-01,
0.12662663e-01,
0.11886760e-01,
0.11119010e-01,
0.10363686e-01,
0.96232807e-02,
0.88988189e-02,
0.81905108e-02,
0.74980101e-02,
0.68207132e-02,
0.61578541e-02,
0.55083996e-02,
0.48709009e-02,
0.42431629e-02,
0.36212225e-02,
0.29974312e-02,
0.23571376e-02,
0.16709621e-02,
0.88186091e-03,
-0.11385441e-03,
-0.14786492e-02,
-0.33936626e-02,
-0.58928211e-02,
-0.86036716e-02,
-0.10764806e-01,
-0.11878857e-01,
-0.12327690e-01,
-0.12838322e-01,
-0.13654754e-01,
-0.14656777e-01,
-0.15782116e-01,
-0.17102929e-01,
-0.18805560e-01,
-0.21208614e-01,
-0.23648826e-01,
-0.23881109e-01,
0.99031366e-02,
0.11158862e-01,
0.12728675e-01,
0.14344782e-01,
0.15815102e-01,
0.17021233e-01,
0.17927697e-01,
0.18567791e-01,
0.19005040e-01,
0.19296985e-01,
0.19481450e-01,
0.19580156e-01,
0.19605840e-01,
0.19566806e-01,
0.19469030e-01,
0.19317091e-01,
0.19114554e-01,
0.18864036e-01,
0.18567108e-01,
0.18224217e-01,
0.17834747e-01,
0.17397350e-01,
0.16910696e-01,
0.16374495e-01,
0.15790507e-01,
0.15163027e-01,
0.14498919e-01,
0.13806998e-01,
0.13096904e-01,
0.12377877e-01,
0.11657882e-01,
0.10943062e-01,
0.10237600e-01,
0.95441425e-02,
0.88640014e-02,
0.81976959e-02,
0.75451867e-02,
0.69061988e-02,
0.62803319e-02,
0.56670737e-02,
0.50658169e-02,
0.44758078e-02,
0.38957228e-02,
0.33232321e-02,
0.27542519e-02,
0.21804536e-02,
0.15856215e-02,
0.93752530e-03,
0.17370789e-03,
-0.81816822e-03,
-0.22095586e-02,
-0.41797617e-02,
-0.67304755e-02,
-0.94296988e-02,
-0.11491459e-01,
-0.12489353e-01,
-0.12887697e-01,
-0.13399359e-01,
-0.14220017e-01,
-0.15221213e-01,
-0.16351623e-01,
-0.17690992e-01,
-0.19434931e-01,
-0.21900531e-01,
-0.24334403e-01,
-0.24576908e-01,
0.91596041e-02,
0.10322293e-01,
0.11776099e-01,
0.13273214e-01,
0.14635916e-01,
0.15754675e-01,
0.16596718e-01,
0.17192930e-01,
0.17602194e-01,
0.17877869e-01,
0.18055072e-01,
0.18153975e-01,
0.18186348e-01,
0.18159697e-01,
0.18079169e-01,
0.17948311e-01,
0.17769367e-01,
0.17543392e-01,
0.17270328e-01,
0.16949220e-01,
0.16578663e-01,
0.16157566e-01,
0.15685990e-01,
0.15165905e-01,
0.14601668e-01,
0.14000042e-01,
0.13369541e-01,
0.12719298e-01,
0.12057992e-01,
0.11393145e-01,
0.10730589e-01,
0.10074451e-01,
0.94274255e-02,
0.87909503e-02,
0.81658233e-02,
0.75523388e-02,
0.69505223e-02,
0.63602584e-02,
0.57813367e-02,
0.52134916e-02,
0.46564899e-02,
0.41098930e-02,
0.35728957e-02,
0.30442516e-02,
0.25214758e-02,
0.19998695e-02,
0.14702382e-02,
0.91451913e-03,
0.29673893e-03,
-0.45211281e-03,
-0.14543475e-02,
-0.28908835e-02,
-0.49353936e-02,
-0.75476863e-02,
-0.10228912e-01,
-0.12177330e-01,
-0.13055839e-01,
-0.13411977e-01,
-0.13932705e-01,
-0.14762470e-01,
-0.15767431e-01,
-0.16908331e-01,
-0.18272618e-01,
-0.20066334e-01,
-0.22602497e-01,
-0.25027955e-01,
-0.25282444e-01,
0.84757134e-02,
0.95526502e-02,
0.10899562e-01,
0.12287115e-01,
0.13550865e-01,
0.14589507e-01,
0.15372781e-01,
0.15929360e-01,
0.16313823e-01,
0.16575595e-01,
0.16747160e-01,
0.16847055e-01,
0.16885890e-01,
0.16870106e-01,
0.16803687e-01,
0.16688865e-01,
0.16526472e-01,
0.16316228e-01,
0.16057082e-01,
0.15747746e-01,
0.15387391e-01,
0.14976392e-01,
0.14516956e-01,
0.14013476e-01,
0.13472388e-01,
0.12901680e-01,
0.12309901e-01,
0.11705250e-01,
0.11094769e-01,
0.10483908e-01,
0.98766321e-02,
0.92755975e-02,
0.86824028e-02,
0.80980305e-02,
0.75229523e-02,
0.69574793e-02,
0.64018168e-02,
0.58560292e-02,
0.53201127e-02,
0.47941157e-02,
0.42779413e-02,
0.37713288e-02,
0.32739611e-02,
0.27850254e-02,
0.23029358e-02,
0.18249402e-02,
0.13457378e-02,
0.85484359e-03,
0.33187922e-03,
-0.26360160e-03,
-0.10086164e-02,
-0.20368681e-02,
-0.35384803e-02,
-0.56763412e-02,
-0.83571654e-02,
-0.11009140e-01,
-0.12828228e-01,
-0.13585953e-01,
-0.13909469e-01,
-0.14446771e-01,
-0.15289789e-01,
-0.16302353e-01,
-0.17458308e-01,
-0.18853234e-01,
-0.20704713e-01,
-0.23318050e-01,
-0.25730822e-01,
-0.25997423e-01,
0.78488868e-02,
0.88471249e-02,
0.10095963e-01,
0.11383073e-01,
0.12556248e-01,
0.13521745e-01,
0.14251599e-01,
0.14772387e-01,
0.15134628e-01,
0.15383984e-01,
0.15550311e-01,
0.15650392e-01,
0.15693463e-01,
0.15684696e-01,
0.15626790e-01,
0.15520756e-01,
0.15366406e-01,
0.15162837e-01,
0.14909022e-01,
0.14604477e-01,
0.14249890e-01,
0.13847613e-01,
0.13401926e-01,
0.12918934e-01,
0.12406076e-01,
0.11871213e-01,
0.11321891e-01,
0.10764673e-01,
0.10204747e-01,
0.96458644e-02,
0.90905391e-02,
0.85403854e-02,
0.79964781e-02,
0.74595166e-02,
0.69300104e-02,
0.64083328e-02,
0.58948221e-02,
0.53897803e-02,
0.48935004e-02,
0.44061504e-02,
0.39277528e-02,
0.34583132e-02,
0.29976317e-02,
0.25451381e-02,
0.20998786e-02,
0.16600611e-02,
0.12224072e-02,
0.78081363e-03,
0.32335857e-03,
-0.17330737e-03,
-0.75507897e-03,
-0.15084407e-02,
-0.25801368e-02,
-0.41686138e-02,
-0.64185774e-02,
-0.91707250e-02,
-0.11776590e-01,
-0.13448806e-01,
-0.14086716e-01,
-0.14388485e-01,
-0.14948927e-01,
-0.15807996e-01,
-0.16831016e-01,
-0.18005654e-01,
-0.19435892e-01,
-0.21352261e-01,
-0.24047775e-01,
-0.26441328e-01,
-0.26718097e-01,
0.72761136e-02,
0.82024941e-02,
0.93617616e-02,
0.10557142e-01,
0.11647610e-01,
0.12546310e-01,
0.13227314e-01,
0.13715181e-01,
0.14056569e-01,
0.14293512e-01,
0.14453239e-01,
0.14550695e-01,
0.14593684e-01,
0.14586142e-01,
0.14529741e-01,
0.14424738e-01,
0.14270628e-01,
0.14066807e-01,
0.13813181e-01,
0.13510717e-01,
0.13161879e-01,
0.12770806e-01,
0.12343196e-01,
0.11885820e-01,
0.11405828e-01,
0.10910179e-01,
0.10404940e-01,
0.98949000e-02,
0.93836067e-02,
0.88735307e-02,
0.83662188e-02,
0.78627151e-02,
0.73637851e-02,
0.68700551e-02,
0.63821264e-02,
0.59005306e-02,
0.54257177e-02,
0.49582440e-02,
0.44985944e-02,
0.40470236e-02,
0.36037776e-02,
0.31689804e-02,
0.27423929e-02,
0.23237155e-02,
0.19122795e-02,
0.15068311e-02,
0.11052121e-02,
0.70368673e-03,
0.29509133e-03,
-0.13425731e-03,
-0.61128259e-03,
-0.11886779e-02,
-0.19643204e-02,
-0.30994283e-02,
-0.47985963e-02,
-0.71782242e-02,
-0.99987285e-02,
-0.12535306e-01,
-0.14042607e-01,
-0.14564627e-01,
-0.14856030e-01,
-0.15444703e-01,
-0.16321121e-01,
-0.17356047e-01,
-0.18551625e-01,
-0.20020738e-01,
-0.22008330e-01,
-0.24788912e-01,
-0.27153788e-01,
-0.27435988e-01,
0.67528659e-02,
0.76138428e-02,
0.86914934e-02,
0.98031582e-02,
0.10817948e-01,
0.11655207e-01,
0.12290766e-01,
0.12747228e-01,
0.13067597e-01,
0.13290452e-01,
0.13440499e-01,
0.13530949e-01,
0.13568352e-01,
0.13555742e-01,
0.13494246e-01,
0.13384038e-01,
0.13225075e-01,
0.13017694e-01,
0.12763139e-01,
0.12463950e-01,
0.12124085e-01,
0.11748808e-01,
0.11344243e-01,
0.10916842e-01,
0.10472860e-01,
0.10017826e-01,
0.95562525e-02,
0.90915291e-02,
0.86259460e-02,
0.81609664e-02,
0.76976661e-02,
0.72367843e-02,
0.67789229e-02,
0.63247578e-02,
0.58749435e-02,
0.54301512e-02,
0.49911011e-02,
0.45584417e-02,
0.41327197e-02,
0.37144551e-02,
0.33040245e-02,
0.29014633e-02,
0.25067537e-02,
0.21196145e-02,
0.17395143e-02,
0.13655368e-02,
0.99622435e-03,
0.62900473e-03,
0.25951708e-03,
-0.12054403e-03,
-0.52714220e-03,
-0.99177333e-03,
-0.15755766e-02,
-0.23898906e-02,
-0.36114564e-02,
-0.54468848e-02,
-0.79709487e-02,
-0.10849346e-01,
-0.13287026e-01,
-0.14611776e-01,
-0.15024997e-01,
-0.15317407e-01,
-0.15937271e-01,
-0.16830366e-01,
-0.17877102e-01,
-0.19094490e-01,
-0.20604689e-01,
-0.22668475e-01,
-0.25534086e-01,
-0.27857255e-01,
-0.28136132e-01,
0.62725060e-02,
0.70738452e-02,
0.80769034e-02,
0.91117928e-02,
0.10056756e-01,
0.10836687e-01,
0.11428916e-01,
0.11854174e-01,
0.12152059e-01,
0.12357980e-01,
0.12494393e-01,
0.12573079e-01,
0.12599719e-01,
0.12576928e-01,
0.12505878e-01,
0.12387235e-01,
0.12221836e-01,
0.12011216e-01,
0.11757963e-01,
0.11465836e-01,
0.11139663e-01,
0.10784952e-01,
0.10407481e-01,
0.10012860e-01,
0.96060764e-02,
0.91912504e-02,
0.87715685e-02,
0.83493032e-02,
0.79259127e-02,
0.75023416e-02,
0.70792502e-02,
0.66572730e-02,
0.62370505e-02,
0.58192150e-02,
0.54045119e-02,
0.49937805e-02,
0.45878929e-02,
0.41875835e-02,
0.37935111e-02,
0.34063701e-02,
0.30265125e-02,
0.26541308e-02,
0.22892184e-02,
0.19315761e-02,
0.15806772e-02,
0.12358783e-02,
0.89602108e-03,
0.55933301e-03,
0.22290465e-03,
-0.11837736e-03,
-0.47439354e-03,
-0.86409569e-03,
-0.13247277e-02,
-0.19277434e-02,
-0.28001412e-02,
-0.41344408e-02,
-0.61328700e-02,
-0.88115418e-02,
-0.11727598e-01,
-0.14030309e-01,
-0.15156906e-01,
-0.15471662e-01,
-0.15775396e-01,
-0.16426805e-01,
-0.17333917e-01,
-0.18390546e-01,
-0.19628605e-01,
-0.21180209e-01,
-0.23323257e-01,
-0.26269557e-01,
-0.28532716e-01,
-0.28793098e-01,
0.58267605e-02,
0.65732352e-02,
0.75074541e-02,
0.84711155e-02,
0.93507199e-02,
0.10076155e-01,
0.10626074e-01,
0.11019482e-01,
0.11292907e-01,
0.11478950e-01,
0.11598186e-01,
0.11661368e-01,
0.11673780e-01,
0.11638117e-01,
0.11556003e-01,
0.11428879e-01,
0.11258594e-01,
0.11047787e-01,
0.10800015e-01,
0.10519676e-01,
0.10211711e-01,
0.98812887e-02,
0.95334202e-02,
0.91725811e-02,
0.88025127e-02,
0.84261857e-02,
0.80457665e-02,
0.76626753e-02,
0.72779190e-02,
0.68921465e-02,
0.65058237e-02,
0.61194906e-02,
0.57338160e-02,
0.53495029e-02,
0.49673491e-02,
0.45883162e-02,
0.42133094e-02,
0.38431743e-02,
0.34787988e-02,
0.31207474e-02,
0.27695056e-02,
0.24253791e-02,
0.20884008e-02,
0.17583206e-02,
0.14347950e-02,
0.11171702e-02,
0.80453861e-03,
0.49563375e-03,
0.18838409e-03,
-0.12067244e-03,
-0.43773217e-03,
-0.77462569e-03,
-0.11541138e-02,
-0.16205981e-02,
-0.22580458e-02,
-0.32116263e-02,
-0.46886820e-02,
-0.68765748e-02,
-0.97123422e-02,
-0.12634131e-01,
-0.14760367e-01,
-0.15676778e-01,
-0.15906356e-01,
-0.16229935e-01,
-0.16910229e-01,
-0.17826060e-01,
-0.18888203e-01,
-0.20143216e-01,
-0.21733690e-01,
-0.23955522e-01,
-0.26971251e-01,
-0.29147699e-01,
-0.29363733e-01,
0.54070717e-02,
0.61022816e-02,
0.69719772e-02,
0.78685069e-02,
0.86859381e-02,
0.93587395e-02,
0.98667927e-02,
0.10227575e-01,
0.10474904e-01,
0.10638993e-01,
0.10738960e-01,
0.10784928e-01,
0.10782113e-01,
0.10733562e-01,
0.10641565e-01,
0.10508422e-01,
0.10336863e-01,
0.10130250e-01,
0.98925652e-02,
0.96282410e-02,
0.93418919e-02,
0.90379883e-02,
0.87205544e-02,
0.83929868e-02,
0.80580246e-02,
0.77177160e-02,
0.73734652e-02,
0.70262798e-02,
0.66767824e-02,
0.63254600e-02,
0.59727905e-02,
0.56192488e-02,
0.52654315e-02,
0.49121627e-02,
0.45603677e-02,
0.42109452e-02,
0.38648082e-02,
0.35230482e-02,
0.31864857e-02,
0.28557458e-02,
0.25313764e-02,
0.22137545e-02,
0.19029069e-02,
0.15987142e-02,
0.13008172e-02,
0.10086106e-02,
0.72139426e-03,
0.43811998e-03,
0.15723592e-03,
-0.12376621e-03,
-0.40906059e-03,
-0.70619176e-03,
-0.10294477e-02,
-0.14063339e-02,
-0.18904223e-02,
-0.25809507e-02,
-0.36431164e-02,
-0.52961027e-02,
-0.76976335e-02,
-0.10682030e-01,
-0.13564016e-01,
-0.15468677e-01,
-0.16168477e-01,
-0.16328586e-01,
-0.16677296e-01,
-0.17379910e-01,
-0.18295975e-01,
-0.19355783e-01,
-0.20619802e-01,
-0.22241637e-01,
-0.24535159e-01,
-0.27597541e-01,
-0.29646901e-01,
-0.29775085e-01,
0.50061666e-02,
0.56525916e-02,
0.64607500e-02,
0.72929878e-02,
0.80504669e-02,
0.86719748e-02,
0.91386139e-02,
0.94665419e-02,
0.96872011e-02,
0.98287789e-02,
0.99092564e-02,
0.99384002e-02,
0.99215917e-02,
0.98623503e-02,
0.97636050e-02,
0.96283276e-02,
0.94598122e-02,
0.92617264e-02,
0.90380237e-02,
0.87927980e-02,
0.85300365e-02,
0.82533574e-02,
0.79658525e-02,
0.76700603e-02,
0.73679010e-02,
0.70607387e-02,
0.67495992e-02,
0.64351326e-02,
0.61177979e-02,
0.57979897e-02,
0.54761712e-02,
0.51528304e-02,
0.48285746e-02,
0.45042443e-02,
0.41807955e-02,
0.38590934e-02,
0.35402139e-02,
0.32251289e-02,
0.29146906e-02,
0.26096692e-02,
0.23106080e-02,
0.20178384e-02,
0.17315357e-02,
0.14516271e-02,
0.11777402e-02,
0.90943626e-03,
0.64600032e-03,
0.38659063e-03,
0.12990853e-03,
-0.12597593e-03,
-0.38408191e-03,
-0.64947037e-03,
-0.93128323e-03,
-0.12469848e-02,
-0.16304143e-02,
-0.21464191e-02,
-0.29125693e-02,
-0.41156993e-02,
-0.59804209e-02,
-0.86140092e-02,
-0.11723425e-01,
-0.14505944e-01,
-0.16143575e-01,
-0.16627248e-01,
-0.16734317e-01,
-0.17108738e-01,
-0.17822174e-01,
-0.18725207e-01,
-0.19769130e-01,
-0.21026963e-01,
-0.22663718e-01,
-0.25009871e-01,
-0.28077208e-01,
-0.29936161e-01,
-0.29903086e-01,
0.46191481e-02,
0.52183974e-02,
0.59670168e-02,
0.67369491e-02,
0.74361749e-02,
0.80076661e-02,
0.84337806e-02,
0.87295286e-02,
0.89242216e-02,
0.90442616e-02,
0.91067078e-02,
0.91211265e-02,
0.90931328e-02,
0.90266792e-02,
0.89251632e-02,
0.87919217e-02,
0.86304275e-02,
0.84443204e-02,
0.82372911e-02,
0.80128787e-02,
0.77742967e-02,
0.75243628e-02,
0.72654327e-02,
0.69993068e-02,
0.67273136e-02,
0.64504785e-02,
0.61694812e-02,
0.58848094e-02,
0.55968449e-02,
0.53059584e-02,
0.50125136e-02,
0.47170832e-02,
0.44203578e-02,
0.41231164e-02,
0.38261772e-02,
0.35306206e-02,
0.32374398e-02,
0.29475170e-02,
0.26617502e-02,
0.23809616e-02,
0.21057320e-02,
0.18363672e-02,
0.15731406e-02,
0.13160043e-02,
0.10647054e-02,
0.81881043e-03,
0.57776505e-03,
0.34065990e-03,
0.10639309e-03,
-0.12660332e-03,
-0.36068761e-03,
-0.59937662e-03,
-0.84880623e-03,
-0.11204274e-02,
-0.14357190e-02,
-0.18365043e-02,
-0.24020236e-02,
-0.32714859e-02,
-0.46532732e-02,
-0.67663151e-02,
-0.96402615e-02,
-0.12831893e-01,
-0.15441692e-01,
-0.16770078e-01,
-0.17045652e-01,
-0.17114457e-01,
-0.17508077e-01,
-0.18213695e-01,
-0.19082699e-01,
-0.20087397e-01,
-0.21311399e-01,
-0.22931056e-01,
-0.25289452e-01,
-0.28288210e-01,
-0.29855460e-01,
-0.29540431e-01,
0.42438372e-02,
0.47970237e-02,
0.54875640e-02,
0.61967750e-02,
0.68393080e-02,
0.73622656e-02,
0.77493028e-02,
0.80144051e-02,
0.81849089e-02,
0.82855793e-02,
0.83326427e-02,
0.83354777e-02,
0.82998471e-02,
0.82299737e-02,
0.81294924e-02,
0.80018509e-02,
0.78504579e-02,
0.76786745e-02,
0.74897227e-02,
0.72865360e-02,
0.70716739e-02,
0.68472759e-02,
0.66150147e-02,
0.63761803e-02,
0.61317808e-02,
0.58825328e-02,
0.56289569e-02,
0.53714588e-02,
0.51103933e-02,
0.48460537e-02,
0.45788516e-02,
0.43093446e-02,
0.40383078e-02,
0.37663926e-02,
0.34944667e-02,
0.32235801e-02,
0.29546390e-02,
0.26885115e-02,
0.24261076e-02,
0.21682207e-02,
0.19154118e-02,
0.16681626e-02,
0.14266440e-02,
0.11909056e-02,
0.96084451e-03,
0.73608221e-03,
0.51601185e-03,
0.29983022e-03,
0.86455977e-04,
-0.12548176e-03,
-0.33780126e-03,
-0.55322744e-03,
-0.77611348e-03,
-0.10140111e-02,
-0.12810014e-02,
-0.16041107e-02,
-0.20356334e-02,
-0.26726879e-02,
-0.36793316e-02,
-0.52829208e-02,
-0.76788506e-02,
-0.10785309e-01,
-0.13992673e-01,
-0.16345447e-01,
-0.17329855e-01,
-0.17411666e-01,
-0.17451219e-01,
-0.17846853e-01,
-0.18514944e-01,
-0.19316018e-01,
-0.20241588e-01,
-0.21382602e-01,
-0.22925586e-01,
-0.25219092e-01,
-0.28025590e-01,
-0.29143283e-01,
-0.28364539e-01,
0.38802244e-02,
0.43883347e-02,
0.50221425e-02,
0.56721950e-02,
0.62597329e-02,
0.67359642e-02,
0.70858784e-02,
0.73225168e-02,
0.74713244e-02,
0.75554666e-02,
0.75903148e-02,
0.75849718e-02,
0.75452076e-02,
0.74753161e-02,
0.73789642e-02,
0.72595305e-02,
0.71201995e-02,
0.69639226e-02,
0.67933705e-02,
0.66109342e-02,
0.64186375e-02,
0.62180408e-02,
0.60103335e-02,
0.57964721e-02,
0.55771875e-02,
0.53530186e-02,
0.51244237e-02,
0.48917811e-02,
0.46553644e-02,
0.44155009e-02,
0.41726306e-02,
0.39273482e-02,
0.36802413e-02,
0.34321158e-02,
0.31838417e-02,
0.29362249e-02,
0.26901832e-02,
0.24466240e-02,
0.22063667e-02,
0.19700988e-02,
0.17385191e-02,
0.15121014e-02,
0.12910479e-02,
0.10755060e-02,
0.86542196e-03,
0.66050846e-03,
0.46016547e-03,
0.26359106e-03,
0.69742789e-04,
-0.12264148e-03,
-0.31505234e-03,
-0.50968054e-03,
-0.70983008e-03,
-0.92059938e-03,
-0.11514680e-02,
-0.14202188e-02,
-0.17611791e-02,
-0.22402222e-02,
-0.29767312e-02,
-0.41622054e-02,
-0.60354555e-02,
-0.87412195e-02,
-0.12048509e-01,
-0.15178418e-01,
-0.17183300e-01,
-0.17799482e-01,
-0.17704254e-01,
-0.17711623e-01,
-0.18075820e-01,
-0.18659104e-01,
-0.19336520e-01,
-0.20114373e-01,
-0.21086661e-01,
-0.22448380e-01,
-0.24543686e-01,
-0.26968870e-01,
-0.27425798e-01,
-0.25982859e-01,
0.35297391e-02,
0.39938814e-02,
0.45724809e-02,
0.51651788e-02,
0.56997212e-02,
0.61313836e-02,
0.64464915e-02,
0.66571729e-02,
0.67870081e-02,
0.68575540e-02,
0.68832617e-02,
0.68728351e-02,
0.68318904e-02,
0.67646406e-02,
0.66746362e-02,
0.65650316e-02,
0.64386618e-02,
0.62980657e-02,
0.61454801e-02,
0.59827766e-02,
0.58114552e-02,
0.56327097e-02,
0.54474538e-02,
0.52563460e-02,
0.50598914e-02,
0.48585772e-02,
0.46528103e-02,
0.44429065e-02,
0.42291693e-02,
0.40119542e-02,
0.37917392e-02,
0.35690265e-02,
0.33443815e-02,
0.31186463e-02,
0.28926407e-02,
0.26670455e-02,
0.24426919e-02,
0.22205103e-02,
0.20012076e-02,
0.17854705e-02,
0.15739684e-02,
0.13671899e-02,
0.11654871e-02,
0.96899009e-03,
0.77774836e-03,
0.59148681e-03,
0.40965760e-03,
0.23145005e-03,
0.55848002e-04,
-0.11826525e-03,
-0.29229349e-03,
-0.46810537e-03,
-0.64812414e-03,
-0.83613564e-03,
-0.10385482e-02,
-0.12673559e-02,
-0.15453240e-02,
-0.19167364e-02,
-0.24653263e-02,
-0.33373679e-02,
-0.47518569e-02,
-0.69443211e-02,
-0.99710366e-02,
-0.13414696e-01,
-0.16345950e-01,
-0.17911386e-01,
-0.18145934e-01,
-0.17885052e-01,
-0.17836532e-01,
-0.18110715e-01,
-0.18532671e-01,
-0.18993681e-01,
-0.19508589e-01,
-0.20170461e-01,
-0.21185948e-01,
-0.22894341e-01,
-0.24724348e-01,
-0.24370475e-01,
-0.22268195e-01,
0.31942856e-02,
0.36158317e-02,
0.41410751e-02,
0.46785669e-02,
0.51624421e-02,
0.55519687e-02,
0.58347746e-02,
0.60220673e-02,
0.61355513e-02,
0.61951405e-02,
0.62143006e-02,
0.62012500e-02,
0.61613480e-02,
0.60985861e-02,
0.60162526e-02,
0.59171882e-02,
0.58038891e-02,
0.56785326e-02,
0.55429381e-02,
0.53985524e-02,
0.52465028e-02,
0.50876997e-02,
0.49228426e-02,
0.47523943e-02,
0.45767589e-02,
0.43963231e-02,
0.42114421e-02,
0.40224311e-02,
0.38296313e-02,
0.36334537e-02,
0.34343314e-02,
0.32326737e-02,
0.30291770e-02,
0.28245456e-02,
0.26194807e-02,
0.24146992e-02,
0.22109461e-02,
0.20089839e-02,
0.18095284e-02,
0.16132607e-02,
0.14207716e-02,
0.12325735e-02,
0.10491284e-02,
0.87066717e-03,
0.69715641e-03,
0.52842626e-03,
0.36394782e-03,
0.20294097e-03,
0.44450713e-04,
-0.11262445e-03,
-0.26963896e-03,
-0.42814302e-03,
-0.59004046e-03,
-0.75823971e-03,
-0.93725865e-03,
-0.11351963e-02,
-0.13674999e-02,
-0.16641312e-02,
-0.20829688e-02,
-0.27305069e-02,
-0.37841904e-02,
-0.54863039e-02,
-0.80432529e-02,
-0.11373957e-01,
-0.14846842e-01,
-0.17431933e-01,
-0.18471425e-01,
-0.18317387e-01,
-0.17884307e-01,
-0.17721877e-01,
-0.17807389e-01,
-0.17944368e-01,
-0.18038927e-01,
-0.18111341e-01,
-0.18263325e-01,
-0.18744303e-01,
-0.19931488e-01,
-0.21152221e-01,
-0.20258194e-01,
-0.18154927e-01,
0.28755944e-02,
0.32562227e-02,
0.37303183e-02,
0.42150859e-02,
0.46508745e-02,
0.50008302e-02,
0.52538323e-02,
0.54201568e-02,
0.55196281e-02,
0.55704820e-02,
0.55851364e-02,
0.55712480e-02,
0.55338335e-02,
0.54765893e-02,
0.54024858e-02,
0.53140409e-02,
0.52134213e-02,
0.51024402e-02,
0.49825548e-02,
0.48549226e-02,
0.47204504e-02,
0.45797881e-02,
0.44334051e-02,
0.42817341e-02,
0.41251029e-02,
0.39637811e-02,
0.37980722e-02,
0.36283240e-02,
0.34549329e-02,
0.32783179e-02,
0.30987991e-02,
0.29169142e-02,
0.27332988e-02,
0.25485274e-02,
0.23631887e-02,
0.21780862e-02,
0.19938366e-02,
0.18110086e-02,
0.16303621e-02,
0.14525431e-02,
0.12780537e-02,
0.11074701e-02,
0.94128802e-03,
0.77982916e-03,
0.62306761e-03,
0.47078525e-03,
0.32257999e-03,
0.17765176e-03,
0.35159428e-04,
-0.10600228e-03,
-0.24719909e-03,
-0.38967439e-03,
-0.53508411e-03,
-0.68561384e-03,
-0.84459153e-03,
-0.10175847e-02,
-0.12152690e-02,
-0.14582346e-02,
-0.17864328e-02,
-0.22758693e-02,
-0.30617819e-02,
-0.43552169e-02,
-0.64090681e-02,
-0.93599092e-02,
-0.12933343e-01,
-0.16276378e-01,
-0.18346461e-01,
-0.18781584e-01,
-0.18226096e-01,
-0.17576260e-01,
-0.17187940e-01,
-0.16925601e-01,
-0.16586989e-01,
-0.16100395e-01,
-0.15514784e-01,
-0.15000328e-01,
-0.14952605e-01,
-0.15897794e-01,
-0.17156484e-01,
-0.16811144e-01,
-0.16059641e-01,
0.25749404e-02,
0.29165992e-02,
0.33420587e-02,
0.37768418e-02,
0.41672788e-02,
0.44802395e-02,
0.47057904e-02,
0.48532858e-02,
0.49406979e-02,
0.49845842e-02,
0.49962578e-02,
0.49827271e-02,
0.49485951e-02,
0.48972480e-02,
0.48313839e-02,
0.47532097e-02,
0.46645310e-02,
0.45668236e-02,
0.44612922e-02,
0.43488974e-02,
0.42303428e-02,
0.41060960e-02,
0.39765178e-02,
0.38419361e-02,
0.37026398e-02,
0.35588457e-02,
0.34108032e-02,
0.32588942e-02,
0.31035459e-02,
0.29450748e-02,
0.27838917e-02,
0.26205610e-02,
0.24555970e-02,
0.22894754e-02,
0.21228124e-02,
0.19562438e-02,
0.17903824e-02,
0.16257121e-02,
0.14628675e-02,
0.13024764e-02,
0.11450466e-02,
0.99116936e-03,
0.84134901e-03,
0.69589523e-03,
0.55489980e-03,
0.41809730e-03,
0.28507152e-03,
0.15523843e-03,
0.27696389e-04,
-0.98706871e-04,
-0.22511023e-03,
-0.35272306e-03,
-0.48294108e-03,
-0.61748014e-03,
-0.75878931e-03,
-0.91083115e-03,
-0.10810896e-02,
-0.12839842e-02,
-0.15473457e-02,
-0.19251802e-02,
-0.25176301e-02,
-0.34943304e-02,
-0.50974879e-02,
-0.75646825e-02,
-0.10904021e-01,
-0.14594574e-01,
-0.17590806e-01,
-0.18962247e-01,
-0.18717308e-01,
-0.17718159e-01,
-0.16742069e-01,
-0.15938997e-01,
-0.15096456e-01,
-0.14041902e-01,
-0.12778138e-01,
-0.11481387e-01,
-0.10537255e-01,
-0.10632942e-01,
-0.12457354e-01,
-0.15134930e-01,
-0.16634356e-01,
-0.17927606e-01,
0.22929932e-02,
0.25978240e-02,
0.29773721e-02,
0.33650806e-02,
0.37129789e-02,
0.39914814e-02,
0.41917670e-02,
0.43222816e-02,
0.43991888e-02,
0.44373926e-02,
0.44470965e-02,
0.44346289e-02,
0.44041476e-02,
0.43587163e-02,
0.43007480e-02,
0.42321626e-02,
0.41544731e-02,
0.40688911e-02,
0.39764121e-02,
0.38778088e-02,
0.37736101e-02,
0.36642191e-02,
0.35499295e-02,
0.34309430e-02,
0.33074785e-02,
0.31797730e-02,
0.30480758e-02,
0.29127430e-02,
0.27741243e-02,
0.26325521e-02,
0.24885421e-02,
0.23425936e-02,
0.21950973e-02,
0.20465283e-02,
0.18974531e-02,
0.17483715e-02,
0.15998175e-02,
0.14523078e-02,
0.13063080e-02,
0.11623653e-02,
0.10211229e-02,
0.88305573e-03,
0.74868003e-03,
0.61835541e-03,
0.49214705e-03,
0.36987942e-03,
0.25109114e-03,
0.13533220e-03,
0.21703247e-04,
-0.90926857e-04,
-0.20352546e-03,
-0.31733362e-03,
-0.43350609e-03,
-0.55338885e-03,
-0.67877601e-03,
-0.81268302e-03,
-0.96038845e-03,
-0.11320858e-02,
-0.13475646e-02,
-0.16453972e-02,
-0.20989443e-02,
-0.28392603e-02,
-0.40741763e-02,
-0.60647582e-02,
-0.89876316e-02,
-0.12647802e-01,
-0.16243244e-01,
-0.18616004e-01,
-0.19093577e-01,
-0.18077876e-01,
-0.16530357e-01,
-0.15029374e-01,
-0.13550565e-01,
-0.11885053e-01,
-0.99943755e-02,
-0.81011895e-02,
-0.66745249e-02,
-0.64049885e-02,
-0.81325630e-02,
-0.12245839e-01,
-0.17089149e-01,
-0.20167973e-01,
-0.22245478e-01,
0.20299123e-02,
0.23001628e-02,
0.26366366e-02,
0.29802567e-02,
0.32884392e-02,
0.35349382e-02,
0.37119628e-02,
0.38270687e-02,
0.38946737e-02,
0.39280793e-02,
0.39364123e-02,
0.39253496e-02,
0.38986101e-02,
0.38588869e-02,
0.38082711e-02,
0.37484372e-02,
0.36807158e-02,
0.36061385e-02,
0.35254795e-02,
0.34393084e-02,
0.33480865e-02,
0.32521591e-02,
0.31517555e-02,
0.30470104e-02,
0.29380592e-02,
0.28251486e-02,
0.27085962e-02,
0.25886497e-02,
0.24655664e-02,
0.23397931e-02,
0.22118504e-02,
0.20821332e-02,
0.19509925e-02,
0.18189276e-02,
0.16863818e-02,
0.15537632e-02,
0.14214842e-02,
0.12901434e-02,
0.11600459e-02,
0.10316600e-02,
0.90566400e-03,
0.78258343e-03,
0.66282053e-03,
0.54670696e-03,
0.43438902e-03,
0.32569954e-03,
0.22029619e-03,
0.11765160e-03,
0.16918209e-04,
-0.82841863e-04,
-0.18264857e-03,
-0.28356965e-03,
-0.38668749e-03,
-0.49303711e-03,
-0.60395780e-03,
-0.72176283e-03,
-0.85029344e-03,
-0.99679129e-03,
-0.11756377e-02,
-0.14147292e-02,
-0.17678216e-02,
-0.23343621e-02,
-0.32830362e-02,
-0.48576673e-02,
-0.73089227e-02,
-0.10680980e-01,
-0.14495158e-01,
-0.17674342e-01,
-0.19089572e-01,
-0.18462013e-01,
-0.16541397e-01,
-0.14258571e-01,
-0.11979854e-01,
-0.96273897e-02,
-0.71729873e-02,
-0.49013551e-02,
-0.33841925e-02,
-0.33057944e-02,
-0.52674399e-02,
-0.96266540e-02,
-0.15894135e-01,
-0.21601008e-01,
-0.24401912e-01,
-0.25605565e-01,
0.17854190e-02,
0.20233756e-02,
0.23196433e-02,
0.26221615e-02,
0.28933994e-02,
0.31102363e-02,
0.32658295e-02,
0.33668731e-02,
0.34261208e-02,
0.34553525e-02,
0.34626699e-02,
0.34531134e-02,
0.34299616e-02,
0.33955548e-02,
0.33516849e-02,
0.32997937e-02,
0.32410624e-02,
0.31763858e-02,
0.31063606e-02,
0.30314266e-02,
0.29519610e-02,
0.28682463e-02,
0.27804545e-02,
0.26887048e-02,
0.25930938e-02,
0.24938518e-02,
0.23912911e-02,
0.22855958e-02,
0.21769952e-02,
0.20659990e-02,
0.19530528e-02,
0.18384572e-02,
0.17226556e-02,
0.16060474e-02,
0.14889956e-02,
0.13717994e-02,
0.12548816e-02,
0.11386774e-02,
0.10235143e-02,
0.90985751e-03,
0.79824886e-03,
0.68927044e-03,
0.58327196e-03,
0.48052520e-03,
0.38123387e-03,
0.28524606e-03,
0.19232801e-03,
0.10188403e-03,
0.13148935e-04,
-0.74649892e-04,
-0.16256304e-03,
-0.25152223e-03,
-0.34248683e-03,
-0.43625894e-03,
-0.53394295e-03,
-0.63723064e-03,
-0.74901024e-03,
-0.87452604e-03,
-0.10243639e-02,
-0.12190320e-02,
-0.14982653e-02,
-0.19371029e-02,
-0.26686641e-02,
-0.39032472e-02,
-0.59056627e-02,
-0.88597359e-02,
-0.12578117e-01,
-0.16234819e-01,
-0.18552493e-01,
-0.18625131e-01,
-0.16659645e-01,
-0.13658211e-01,
-0.10453119e-01,
-0.73239915e-02,
-0.43831225e-02,
-0.19895632e-02,
-0.75665663e-03,
-0.12641177e-02,
-0.37606840e-02,
-0.81160162e-02,
-0.14009449e-01,
-0.20516826e-01,
-0.25252979e-01,
-0.26749684e-01,
-0.26949666e-01,
0.15589494e-02,
0.17668876e-02,
0.20257886e-02,
0.22901346e-02,
0.25271075e-02,
0.27164973e-02,
0.28523379e-02,
0.29405085e-02,
0.29921937e-02,
0.30177287e-02,
0.30242268e-02,
0.30161096e-02,
0.29962433e-02,
0.29666633e-02,
0.29289203e-02,
0.28842422e-02,
0.28336111e-02,
0.27777818e-02,
0.27172987e-02,
0.26525310e-02,
0.25837307e-02,
0.25110987e-02,
0.24347827e-02,
0.23548861e-02,
0.22715516e-02,
0.21849549e-02,
0.20953005e-02,
0.20027831e-02,
0.19077143e-02,
0.18105077e-02,
0.17114856e-02,
0.16110209e-02,
0.15095366e-02,
0.14073697e-02,
0.13047857e-02,
0.12020444e-02,
0.10995269e-02,
0.99746429e-03,
0.89634967e-03,
0.79651998e-03,
0.69846597e-03,
0.60272694e-03,
0.50964806e-03,
0.41947985e-03,
0.33230876e-03,
0.24821598e-03,
0.16693448e-03,
0.87793873e-04,
0.10210203e-04,
-0.66529035e-04,
-0.14337612e-03,
-0.22124445e-03,
-0.30088221e-03,
-0.38298120e-03,
-0.46846602e-03,
-0.55856747e-03,
-0.65543054e-03,
-0.76301250e-03,
-0.88915287e-03,
-0.10492229e-02,
-0.12729177e-02,
-0.16169698e-02,
-0.21851282e-02,
-0.31512871e-02,
-0.47627124e-02,
-0.72655436e-02,
-0.10682470e-01,
-0.14481976e-01,
-0.17479382e-01,
-0.18369909e-01,
-0.16695704e-01,
-0.13192192e-01,
-0.90477718e-02,
-0.50837407e-02,
-0.17456600e-02,
0.50775765e-03,
0.11405456e-02,
-0.15771549e-03,
-0.32012013e-02,
-0.74220751e-02,
-0.12348277e-01,
-0.17880509e-01,
-0.23407269e-01,
-0.26902754e-01,
-0.27527036e-01,
-0.27351677e-01,
0.13498098e-02,
0.15299571e-02,
0.17542662e-02,
0.19832880e-02,
0.21885794e-02,
0.23526303e-02,
0.24702842e-02,
0.25466559e-02,
0.25914581e-02,
0.26136602e-02,
0.26194309e-02,
0.26126066e-02,
0.25956824e-02,
0.25704356e-02,
0.25382130e-02,
0.25000437e-02,
0.24567009e-02,
0.24088090e-02,
0.23568932e-02,
0.23012925e-02,
0.22421621e-02,
0.21796003e-02,
0.21137316e-02,
0.20446836e-02,
0.19726283e-02,
0.18976876e-02,
0.18199275e-02,
0.17396152e-02,
0.16571450e-02,
0.15727388e-02,
0.14866672e-02,
0.13993700e-02,
0.13112020e-02,
0.12224860e-02,
0.11333575e-02,
0.10441266e-02,
0.95500512e-03,
0.86619117e-03,
0.77818817e-03,
0.69130980e-03,
0.60596765e-03,
0.52263023e-03,
0.44163744e-03,
0.36320378e-03,
0.28736555e-03,
0.21433813e-03,
0.14382407e-03,
0.75184740e-04,
0.79467936e-05,
-0.58581456e-04,
-0.12522114e-03,
-0.19278513e-03,
-0.26187330e-03,
-0.33317646e-03,
-0.40739289e-03,
-0.48542794e-03,
-0.56885718e-03,
-0.66080934e-03,
-0.76709641e-03,
-0.89942804e-03,
-0.10802879e-02,
-0.13528165e-02,
-0.17975677e-02,
-0.25551619e-02,
-0.38419224e-02,
-0.59169503e-02,
-0.89288345e-02,
-0.12605214e-01,
-0.15980633e-01,
-0.17610485e-01,
-0.16458714e-01,
-0.12757922e-01,
-0.78148004e-02,
-0.30447829e-02,
0.58803929e-03,
0.24830827e-02,
0.23169408e-02,
0.21695795e-03,
-0.31757678e-02,
-0.70471163e-02,
-0.10940184e-01,
-0.15055859e-01,
-0.19874115e-01,
-0.24761699e-01,
-0.27603908e-01,
-0.27851652e-01,
-0.27539870e-01,
0.11573090e-02,
0.13118297e-02,
0.15042420e-02,
0.17007013e-02,
0.18768052e-02,
0.20175355e-02,
0.21184757e-02,
0.21840241e-02,
0.22225198e-02,
0.22416625e-02,
0.22467452e-02,
0.22410604e-02,
0.22267597e-02,
0.22053893e-02,
0.21781034e-02,
0.21457481e-02,
0.21089534e-02,
0.20682334e-02,
0.20240291e-02,
0.19766658e-02,
0.19262728e-02,
0.18728701e-02,
0.18165240e-02,
0.17574138e-02,
0.16956952e-02,
0.16314362e-02,
0.15646521e-02,
0.14956590e-02,
0.14248277e-02,
0.13522609e-02,
0.12782239e-02,
0.12031234e-02,
0.11273304e-02,
0.10510463e-02,
0.97442413e-03,
0.89773658e-03,
0.82104641e-03,
0.74460293e-03,
0.66874258e-03,
0.59397513e-03,
0.52050536e-03,
0.44870362e-03,
0.37896109e-03,
0.31140892e-03,
0.24619559e-03,
0.18341106e-03,
0.12278830e-03,
0.63891501e-04,
0.61988922e-05,
-0.50909595e-04,
-0.10814507e-03,
-0.16616660e-03,
-0.22550514e-03,
-0.28680690e-03,
-0.35062019e-03,
-0.41758217e-03,
-0.48887444e-03,
-0.56698389e-03,
-0.65631996e-03,
-0.76583039e-03,
-0.91279822e-03,
-0.11302640e-02,
-0.14807846e-02,
-0.20767434e-02,
-0.31004245e-02,
-0.47965096e-02,
-0.73740366e-02,
-0.10751202e-01,
-0.14214644e-01,
-0.16375747e-01,
-0.15823588e-01,
-0.12226088e-01,
-0.67599509e-02,
-0.13356704e-02,
0.24713113e-02,
0.38747264e-02,
0.28679580e-02,
0.11903104e-03,
-0.33430753e-02,
-0.66695027e-02,
-0.96058799e-02,
-0.12532249e-01,
-0.16198792e-01,
-0.21001449e-01,
-0.25733067e-01,
-0.28118331e-01,
-0.27977712e-01,
-0.27381005e-01,
0.98083925e-03,
0.11118341e-02,
0.12749631e-02,
0.14415317e-02,
0.15908498e-02,
0.17101859e-02,
0.17957988e-02,
0.18514196e-02,
0.18841210e-02,
0.19004331e-02,
0.19048484e-02,
0.19001653e-02,
0.18882133e-02,
0.18703013e-02,
0.18473966e-02,
0.18202132e-02,
0.17892951e-02,
0.17550559e-02,
0.17178158e-02,
0.16778519e-02,
0.16353233e-02,
0.15902224e-02,
0.15425772e-02,
0.14925570e-02,
0.14402777e-02,
0.13857534e-02,
0.13290886e-02,
0.12705566e-02,
0.12104037e-02,
0.11487597e-02,
0.10858473e-02,
0.10220249e-02,
0.95764559e-03,
0.89280307e-03,
0.82777342e-03,
0.76263881e-03,
0.69743692e-03,
0.63244364e-03,
0.56787435e-03,
0.50428713e-03,
0.44183279e-03,
0.38074178e-03,
0.32141685e-03,
0.26393912e-03,
0.20854473e-03,
0.15521001e-03,
0.10369664e-03,
0.53771990e-04,
0.48405859e-05,
-0.43625452e-04,
-0.92199283e-04,
-0.14142317e-03,
-0.19180843e-03,
-0.24387297e-03,
-0.29810442e-03,
-0.35491036e-03,
-0.41522519e-03,
-0.48093690e-03,
-0.55558363e-03,
-0.64595538e-03,
-0.76544558e-03,
-0.93956996e-03,
-0.12170107e-02,
-0.16869402e-02,
-0.24994221e-02,
-0.38717622e-02,
-0.60297726e-02,
-0.90123136e-02,
-0.12333807e-01,
-0.14771741e-01,
-0.14760138e-01,
-0.11492035e-01,
-0.58502830e-02,
-0.39222432e-04,
0.38003121e-02,
0.46805777e-02,
0.29477894e-02,
-0.19698367e-03,
-0.34707133e-02,
-0.61565377e-02,
-0.82763545e-02,
-0.10341127e-01,
-0.13046334e-01,
-0.17025929e-01,
-0.22158746e-01,
-0.26648698e-01,
-0.28339002e-01,
-0.27549669e-01,
-0.26395839e-01,
0.81985246e-03,
0.92936639e-03,
0.10657535e-02,
0.12050235e-02,
0.13298774e-02,
0.14296711e-02,
0.15012774e-02,
0.15478174e-02,
0.15752059e-02,
0.15889057e-02,
0.15926794e-02,
0.15888754e-02,
0.15790189e-02,
0.15641790e-02,
0.15451561e-02,
0.15225782e-02,
0.14969195e-02,
0.14685052e-02,
0.14375511e-02,
0.14042476e-02,
0.13687719e-02,
0.13311717e-02,
0.12914606e-02,
0.12497153e-02,
0.12060066e-02,
0.11603512e-02,
0.11129703e-02,
0.10640363e-02,
0.10136392e-02,
0.96200767e-03,
0.90932270e-03,
0.85588358e-03,
0.80193224e-03,
0.74761838e-03,
0.69321913e-03,
0.63866674e-03,
0.58403739e-03,
0.52954548e-03,
0.47544795e-03,
0.42208337e-03,
0.36977371e-03,
0.31858659e-03,
0.26881197e-03,
0.22063967e-03,
0.17424425e-03,
0.12957763e-03,
0.86436587e-04,
0.44699889e-04,
0.37974135e-05,
-0.36786892e-04,
-0.77447279e-04,
-0.11859972e-03,
-0.16077398e-03,
-0.20438076e-03,
-0.24981576e-03,
-0.29733716e-03,
-0.34771886e-03,
-0.40232454e-03,
-0.46408881e-03,
-0.53816853e-03,
-0.63491607e-03,
-0.77419216e-03,
-0.99393132e-03,
-0.13644308e-02,
-0.20072565e-02,
-0.31077981e-02,
-0.48824749e-02,
-0.74348073e-02,
-0.10457132e-01,
-0.12935034e-01,
-0.13322859e-01,
-0.10510216e-01,
-0.50372370e-02,
0.82519592e-03,
0.45412472e-02,
0.49564531e-02,
0.27259453e-02,
-0.53395459e-03,
-0.34414190e-02,
-0.54976437e-02,
-0.69722342e-02,
-0.84393835e-02,
-0.10464143e-01,
-0.13568074e-01,
-0.18088147e-01,
-0.23360549e-01,
-0.27191006e-01,
-0.27798105e-01,
-0.26014257e-01,
-0.23941122e-01,
0.67388569e-03,
0.76391455e-03,
0.87603967e-03,
0.99053816e-03,
0.10931880e-02,
0.11752385e-02,
0.12341201e-02,
0.12724005e-02,
0.12949465e-02,
0.13062552e-02,
0.13094257e-02,
0.13063867e-02,
0.12983767e-02,
0.12862559e-02,
0.12706857e-02,
0.12522101e-02,
0.12312382e-02,
0.12080271e-02,
0.11827156e-02,
0.11554065e-02,
0.11262480e-02,
0.10953922e-02,
0.10628636e-02,
0.10286153e-02,
0.99264085e-03,
0.95504802e-03,
0.91611175e-03,
0.87590015e-03,
0.83438883e-03,
0.79185999e-03,
0.74851344e-03,
0.70453959e-03,
0.66006870e-03,
0.61538786e-03,
0.57062833e-03,
0.52572385e-03,
0.48075779e-03,
0.43583699e-03,
0.39130700e-03,
0.34728937e-03,
0.30423937e-03,
0.26211512e-03,
0.22102211e-03,
0.18136523e-03,
0.14320243e-03,
0.10641267e-03,
0.70891205e-04,
0.36568774e-04,
0.29850753e-05,
-0.30428575e-04,
-0.63911211e-04,
-0.97726115e-04,
-0.13241505e-03,
-0.16834371e-03,
-0.20574214e-03,
-0.24483376e-03,
-0.28623073e-03,
-0.33093616e-03,
-0.38131754e-03,
-0.44136596e-03,
-0.51900127e-03,
-0.62973931e-03,
-0.80306787e-03,
-0.10939565e-02,
-0.15995824e-02,
-0.24733264e-02,
-0.39081872e-02,
-0.60330946e-02,
-0.86653326e-02,
-0.10997782e-01,
-0.11619342e-01,
-0.93000261e-02,
-0.42796796e-02,
0.12966705e-02,
0.47367909e-02,
0.48012077e-02,
0.23494719e-02,
-0.77558518e-03,
-0.32301398e-02,
-0.47397721e-02,
-0.57379329e-02,
-0.67931442e-02,
-0.83390176e-02,
-0.10730988e-01,
-0.14351542e-01,
-0.19255424e-01,
-0.24215246e-01,
-0.26824960e-01,
-0.25858946e-01,
-0.22609137e-01,
-0.19117815e-01,
0.54258847e-03,
0.61508559e-03,
0.70537691e-03,
0.79757918e-03,
0.88023918e-03,
0.94631047e-03,
0.99372561e-03,
0.10245563e-02,
0.10427287e-02,
0.10518713e-02,
0.10544811e-02,
0.10520979e-02,
0.10457006e-02,
0.10359808e-02,
0.10234818e-02,
0.10086508e-02,
0.99183375e-03,
0.97323948e-03,
0.95295400e-03,
0.93100907e-03,
0.90750493e-03,
0.88267523e-03,
0.85658359e-03,
0.82906929e-03,
0.80003683e-03,
0.76972524e-03,
0.73839410e-03,
0.70602290e-03,
0.67255105e-03,
0.63823280e-03,
0.60333178e-03,
0.56789321e-03,
0.53199811e-03,
0.49601536e-03,
0.45993822e-03,
0.42375299e-03,
0.38750842e-03,
0.35126708e-03,
0.31534981e-03,
0.27984436e-03,
0.24513889e-03,
0.21121174e-03,
0.17798673e-03,
0.14601613e-03,
0.11531570e-03,
0.85622109e-04,
0.56977922e-04,
0.29328881e-04,
0.23219493e-05,
-0.24609051e-04,
-0.51613693e-04,
-0.78829580e-04,
-0.10677023e-03,
-0.13576847e-03,
-0.16589028e-03,
-0.19740456e-03,
-0.23071076e-03,
-0.26662330e-03,
-0.30694614e-03,
-0.35483547e-03,
-0.41624482e-03,
-0.50326047e-03,
-0.63863368e-03,
-0.86484943e-03,
-0.12584554e-02,
-0.19429043e-02,
-0.30815345e-02,
-0.48037078e-02,
-0.70073316e-02,
-0.90698786e-02,
-0.97764749e-02,
-0.79293735e-02,
-0.35561759e-02,
0.14528841e-02,
0.44905520e-02,
0.43353909e-02,
0.19235054e-02,
-0.88058808e-03,
-0.28684181e-02,
-0.39436910e-02,
-0.46082577e-02,
-0.53751953e-02,
-0.65655066e-02,
-0.84049981e-02,
-0.11191580e-01,
-0.15233751e-01,
-0.20167785e-01,
-0.24146404e-01,
-0.24793619e-01,
-0.21611178e-01,
-0.16357245e-01,
-0.11137053e-01,
0.42571200e-03,
0.48259774e-03,
0.55344484e-03,
0.62578957e-03,
0.69064385e-03,
0.74247847e-03,
0.77967369e-03,
0.80386118e-03,
0.81812969e-03,
0.82533201e-03,
0.82742231e-03,
0.82559290e-03,
0.82059979e-03,
0.81299205e-03,
0.80320571e-03,
0.79159206e-03,
0.77843288e-03,
0.76390296e-03,
0.74805156e-03,
0.73085778e-03,
0.71239163e-03,
0.69290656e-03,
0.67251193e-03,
0.65096776e-03,
0.62812673e-03,
0.60432340e-03,
0.57976606e-03,
0.55436889e-03,
0.52808766e-03,
0.50110236e-03,
0.47373594e-03,
0.44590727e-03,
0.41769189e-03,
0.38944930e-03,
0.36113514e-03,
0.33273242e-03,
0.30424978e-03,
0.27580233e-03,
0.24755922e-03,
0.21969499e-03,
0.19240828e-03,
0.16580452e-03,
0.13966703e-03,
0.11454133e-03,
0.90488385e-04,
0.67134846e-04,
0.44648674e-04,
0.22957218e-04,
0.17676108e-05,
-0.19375148e-04,
-0.40583618e-04,
-0.61928760e-04,
-0.83859930e-04,
-0.10665116e-03,
-0.13027368e-03,
-0.15504849e-03,
-0.18114736e-03,
-0.20928915e-03,
-0.24077985e-03,
-0.27812496e-03,
-0.32568991e-03,
-0.39278640e-03,
-0.49672712e-03,
-0.66978729e-03,
-0.97107701e-03,
-0.14971532e-02,
-0.23801890e-02,
-0.37356641e-02,
-0.55097779e-02,
-0.72345133e-02,
-0.79152975e-02,
-0.64894208e-02,
-0.28653429e-02,
0.13867198e-02,
0.39382749e-02,
0.36805528e-02,
0.15104047e-02,
-0.85980649e-03,
-0.24118538e-02,
-0.31621221e-02,
-0.36013129e-02,
-0.41603735e-02,
-0.50707515e-02,
-0.64725522e-02,
-0.85721835e-02,
-0.11695998e-01,
-0.15934667e-01,
-0.20252047e-01,
-0.22257976e-01,
-0.20039089e-01,
-0.14158701e-01,
-0.70773158e-02,
-0.11583769e-02,
0.32308648e-03,
0.36626059e-03,
0.42002957e-03,
0.47493313e-03,
0.52414869e-03,
0.56347938e-03,
0.59169793e-03,
0.61004912e-03,
0.62088534e-03,
0.62637340e-03,
0.62798650e-03,
0.62661694e-03,
0.62283722e-03,
0.61707193e-03,
0.60965854e-03,
0.60086075e-03,
0.59088779e-03,
0.57988992e-03,
0.56789967e-03,
0.55486214e-03,
0.54083182e-03,
0.52603544e-03,
0.51060918e-03,
0.49428100e-03,
0.47690922e-03,
0.45883877e-03,
0.44021819e-03,
0.42093766e-03,
0.40098530e-03,
0.38046407e-03,
0.35971330e-03,
0.33858226e-03,
0.31714686e-03,
0.29569783e-03,
0.27421847e-03,
0.25265093e-03,
0.23099354e-03,
0.20943003e-03,
0.18794239e-03,
0.16680970e-03,
0.14604363e-03,
0.12587215e-03,
0.10602640e-03,
0.86910019e-04,
0.68677633e-04,
0.50912015e-04,
0.33862842e-04,
0.17410162e-04,
0.13113118e-05,
-0.14745443e-04,
-0.30839601e-04,
-0.47037924e-04,
-0.63687643e-04,
-0.81004437e-04,
-0.98914243e-04,
-0.11776508e-03,
-0.13753906e-03,
-0.15888251e-03,
-0.18270283e-03,
-0.21094215e-03,
-0.24672056e-03,
-0.29703090e-03,
-0.37476781e-03,
-0.50383923e-03,
-0.72860206e-03,
-0.11220841e-02,
-0.17865424e-02,
-0.28166992e-02,
-0.41856705e-02,
-0.55507738e-02,
-0.61370027e-02,
-0.50718379e-02,
-0.22190376e-02,
0.11870584e-02,
0.32180531e-02,
0.29439172e-02,
0.11399792e-02,
-0.75004215e-03,
-0.19179990e-02,
-0.24332437e-02,
-0.27225760e-02,
-0.31262944e-02,
-0.38075345e-02,
-0.48529524e-02,
-0.64000175e-02,
-0.87149860e-02,
-0.12041125e-01,
-0.15928924e-01,
-0.18521670e-01,
-0.17406538e-01,
-0.11980806e-01,
-0.43471926e-02,
0.24430454e-02,
0.64657470e-02,
0.23461202e-03,
0.26596323e-03,
0.30500707e-03,
0.34487346e-03,
0.38060735e-03,
0.40916057e-03,
0.42964329e-03,
0.44296391e-03,
0.45083603e-03,
0.45483361e-03,
0.45601939e-03,
0.45503493e-03,
0.45229631e-03,
0.44811305e-03,
0.44273314e-03,
0.43635038e-03,
0.42911444e-03,
0.42114657e-03,
0.41246001e-03,
0.40299704e-03,
0.39280354e-03,
0.38204770e-03,
0.37087622e-03,
0.35903638e-03,
0.34639766e-03,
0.33326566e-03,
0.31975409e-03,
0.30575541e-03,
0.29127134e-03,
0.27634719e-03,
0.26128866e-03,
0.24593511e-03,
0.23035664e-03,
0.21476793e-03,
0.19919084e-03,
0.18352638e-03,
0.16776577e-03,
0.15213400e-03,
0.13650597e-03,
0.12117127e-03,
0.10604783e-03,
0.91420814e-04,
0.77014876e-04,
0.63102707e-04,
0.49865157e-04,
0.36943147e-04,
0.24579298e-04,
0.12646942e-04,
0.93583714e-06,
-0.10726711e-04,
-0.22409520e-04,
-0.34169330e-04,
-0.46268411e-04,
-0.58847447e-04,
-0.71843184e-04,
-0.85561762e-04,
-0.99899378e-04,
-0.11538975e-03,
-0.13264996e-03,
-0.15311912e-03,
-0.17894371e-03,
-0.21519083e-03,
-0.27111307e-03,
-0.36378118e-03,
-0.52517361e-03,
-0.80816675e-03,
-0.12878858e-02,
-0.20363682e-02,
-0.30407030e-02,
-0.40585143e-02,
-0.45180605e-02,
-0.37541774e-02,
-0.16340034e-02,
0.92716783e-03,
0.24486976e-02,
0.22101640e-02,
0.82245702e-03,
-0.59355749e-03,
-0.14349412e-02,
-0.17818650e-02,
-0.19710022e-02,
-0.22550686e-02,
-0.27460111e-02,
-0.34975049e-02,
-0.45994679e-02,
-0.62465151e-02,
-0.86843111e-02,
-0.11772950e-01,
-0.14247025e-01,
-0.13897673e-01,
-0.95402114e-02,
-0.26456779e-02,
0.37637560e-02,
0.75113312e-02,
0.82785152e-02,
0.16024915e-03,
0.18166313e-03,
0.20833101e-03,
0.23556003e-03,
0.25996531e-03,
0.27946456e-03,
0.29345075e-03,
0.30254686e-03,
0.30792650e-03,
0.31066421e-03,
0.31148014e-03,
0.31080900e-03,
0.30893844e-03,
0.30608277e-03,
0.30241298e-03,
0.29805984e-03,
0.29311786e-03,
0.28767882e-03,
0.28175494e-03,
0.27529473e-03,
0.26832905e-03,
0.26097632e-03,
0.25335979e-03,
0.24527783e-03,
0.23663967e-03,
0.22766842e-03,
0.21843993e-03,
0.20887410e-03,
0.19898124e-03,
0.18877641e-03,
0.17849728e-03,
0.16801062e-03,
0.15736978e-03,
0.14671541e-03,
0.13608270e-03,
0.12537625e-03,
0.11459773e-03,
0.10394175e-03,
0.93250273e-04,
0.82784871e-04,
0.72433824e-04,
0.62449340e-04,
0.52622323e-04,
0.43097367e-04,
0.34058194e-04,
0.25217514e-04,
0.16786646e-04,
0.86424679e-05,
0.63486573e-06,
-0.73354054e-05,
-0.15307762e-04,
-0.23343211e-04,
-0.31608852e-04,
-0.40205730e-04,
-0.49073769e-04,
-0.58462469e-04,
-0.68241599e-04,
-0.78819096e-04,
-0.90594622e-04,
-0.10456181e-03,
-0.12214016e-03,
-0.14677636e-03,
-0.18476187e-03,
-0.24763195e-03,
-0.35712289e-03,
-0.54928858e-03,
-0.87579578e-03,
-0.13871513e-02,
-0.20772433e-02,
-0.27833104e-02,
-0.31113259e-02,
-0.25939168e-02,
-0.11258019e-02,
0.66039932e-03,
0.17193498e-02,
0.15396239e-02,
0.55894168e-03,
-0.42620959e-03,
-0.99759083e-03,
-0.12229718e-02,
-0.13435301e-02,
-0.15339972e-02,
-0.18679998e-02,
-0.23784370e-02,
-0.31225604e-02,
-0.42320755e-02,
-0.58987723e-02,
-0.81079528e-02,
-0.10059725e-01,
-0.10070107e-01,
-0.69461395e-02,
-0.15970924e-02,
0.35173711e-02,
0.64823525e-02,
0.69939806e-02,
0.58447313e-02,
0.99998928e-04,
0.11336149e-03,
0.13000234e-03,
0.14699313e-03,
0.16222145e-03,
0.17438777e-03,
0.18311353e-03,
0.18878833e-03,
0.19214548e-03,
0.19385564e-03,
0.19436714e-03,
0.19395056e-03,
0.19278521e-03,
0.19100346e-03,
0.18871219e-03,
0.18599514e-03,
0.18291226e-03,
0.17952279e-03,
0.17582905e-03,
0.17179834e-03,
0.16745202e-03,
0.16285951e-03,
0.15811202e-03,
0.15307403e-03,
0.14767774e-03,
0.14207418e-03,
0.13631718e-03,
0.13035099e-03,
0.12418116e-03,
0.11780974e-03,
0.11139606e-03,
0.10484913e-03,
0.98205564e-04,
0.91553651e-04,
0.84926301e-04,
0.78246710e-04,
0.71511990e-04,
0.64868305e-04,
0.58195605e-04,
0.51666491e-04,
0.45197361e-04,
0.38973347e-04,
0.32842243e-04,
0.26893933e-04,
0.21249898e-04,
0.15730866e-04,
0.10472757e-04,
0.53960257e-05,
0.39317513e-06,
-0.45791730e-05,
-0.95536843e-05,
-0.14566379e-04,
-0.19728041e-04,
-0.25091455e-04,
-0.30624815e-04,
-0.36488018e-04,
-0.42586758e-04,
-0.49185081e-04,
-0.56528501e-04,
-0.65242508e-04,
-0.76187855e-04,
-0.91523340e-04,
-0.11515653e-03,
-0.15424404e-03,
-0.22232164e-03,
-0.34186023e-03,
-0.54521387e-03,
-0.86433312e-03,
-0.12963127e-02,
-0.17405468e-02,
-0.19500151e-02,
-0.16286803e-02,
-0.70585759e-03,
0.42125979e-03,
0.10887617e-02,
0.97085984e-03,
0.34771912e-03,
-0.27306599e-03,
-0.62840234e-03,
-0.76508470e-03,
-0.83742116e-03,
-0.95511600e-03,
-0.11631359e-02,
-0.14807530e-02,
-0.19422875e-02,
-0.26292126e-02,
-0.36683388e-02,
-0.50780545e-02,
-0.63869823e-02,
-0.64897770e-02,
-0.44976156e-02,
-0.92731958e-03,
0.25404119e-02,
0.45405924e-02,
0.48503033e-02,
0.40294230e-02,
0.27683314e-02,
0.53884014e-04,
0.61084211e-04,
0.70050810e-04,
0.79205915e-04,
0.87411223e-04,
0.93966446e-04,
0.98667610e-04,
0.10172475e-03,
0.10353317e-03,
0.10445446e-03,
0.10473084e-03,
0.10450836e-03,
0.10388237e-03,
0.10292211e-03,
0.10168495e-03,
0.10021854e-03,
0.98558390e-04,
0.96735770e-04,
0.94745832e-04,
0.92573500e-04,
0.90232592e-04,
0.87755776e-04,
0.85199157e-04,
0.82488186e-04,
0.79575933e-04,
0.76551951e-04,
0.73451745e-04,
0.70241033e-04,
0.66919587e-04,
0.63485968e-04,
0.60029004e-04,
0.56497996e-04,
0.52914555e-04,
0.49328813e-04,
0.45763074e-04,
0.42167223e-04,
0.38533406e-04,
0.34952976e-04,
0.31360858e-04,
0.27841659e-04,
0.24351986e-04,
0.21002805e-04,
0.17696189e-04,
0.14492869e-04,
0.11447947e-04,
0.84757721e-05,
0.56412364e-05,
0.29090747e-05,
0.20986288e-06,
-0.24671158e-05,
-0.51494958e-05,
-0.78480261e-05,
-0.10632524e-04,
-0.13520086e-04,
-0.16503614e-04,
-0.19662311e-04,
-0.22948981e-04,
-0.26503118e-04,
-0.30458663e-04,
-0.35155379e-04,
-0.41044819e-04,
-0.49301310e-04,
-0.62018436e-04,
-0.83043000e-04,
-0.11966786e-03,
-0.18399033e-03,
-0.29346853e-03,
-0.46542627e-03,
-0.69852220e-03,
-0.93878357e-03,
-0.10528294e-02,
-0.88006578e-03,
-0.38117066e-03,
0.22924476e-03,
0.59057289e-03,
0.52561943e-03,
0.18708916e-03,
-0.14889365e-03,
-0.34005527e-03,
-0.41272707e-03,
-0.45099761e-03,
-0.51413663e-03,
-0.62613422e-03,
-0.79706579e-03,
-0.10450790e-02,
-0.14138672e-02,
-0.19733894e-02,
-0.27403242e-02,
-0.34682453e-02,
-0.35488785e-02,
-0.24659703e-02,
-0.48296375e-03,
0.14571503e-02,
0.25734871e-02,
0.27371510e-02,
0.22676513e-02,
0.15553402e-02,
0.87312493e-03,
0.21932714e-04,
0.24863417e-04,
0.28513090e-04,
0.32239484e-04,
0.35579265e-04,
0.38247381e-04,
0.40160809e-04,
0.41405037e-04,
0.42140975e-04,
0.42515869e-04,
0.42628493e-04,
0.42538381e-04,
0.42284017e-04,
0.41893109e-04,
0.41388936e-04,
0.40791474e-04,
0.40115974e-04,
0.39374940e-04,
0.38564984e-04,
0.37680649e-04,
0.36728081e-04,
0.35719502e-04,
0.34679109e-04,
0.33576438e-04,
0.32390075e-04,
0.31158215e-04,
0.29896764e-04,
0.28590852e-04,
0.27239597e-04,
0.25841982e-04,
0.24434608e-04,
0.22996630e-04,
0.21537227e-04,
0.20077452e-04,
0.18627210e-04,
0.17164399e-04,
0.15684327e-04,
0.14226660e-04,
0.12765439e-04,
0.11332698e-04,
0.99115950e-05,
0.85493384e-05,
0.72026537e-05,
0.58994046e-05,
0.46591872e-05,
0.34499117e-05,
0.22957561e-05,
0.11843766e-05,
0.84998014e-07,
-0.10040685e-05,
-0.20963864e-05,
-0.31941836e-05,
-0.43282666e-05,
-0.55030100e-05,
-0.67179099e-05,
-0.80033133e-05,
-0.93413128e-05,
-0.10787697e-04,
-0.12397514e-04,
-0.14309594e-04,
-0.16705344e-04,
-0.20065328e-04,
-0.25239178e-04,
-0.33791446e-04,
-0.48691152e-04,
-0.74860320e-04,
-0.11940804e-03,
-0.18940083e-03,
-0.28432420e-03,
-0.38224252e-03,
-0.42882701e-03,
-0.35856038e-03,
-0.15526464e-03,
0.93624702e-04,
0.24092782e-03,
0.21429047e-03,
0.76112388e-04,
-0.60849816e-04,
-0.13861567e-03,
-0.16805886e-03,
-0.18353794e-03,
-0.20919893e-03,
-0.25477342e-03,
-0.32431912e-03,
-0.42517454e-03,
-0.57509379e-03,
-0.80277218e-03,
-0.11159416e-02,
-0.14153966e-02,
-0.14518298e-02,
-0.10097986e-02,
-0.19427406e-03,
0.60561736e-03,
0.10654813e-02,
0.11315914e-02,
0.93659549e-03,
0.64201583e-03,
0.36030522e-03,
0.14866883e-03,
0.41634707e-05,
0.47198023e-05,
0.54126149e-05,
0.61199912e-05,
0.67539763e-05,
0.72604594e-05,
0.76236788e-05,
0.78598650e-05,
0.79995607e-05,
0.80707214e-05,
0.80921054e-05,
0.80750178e-05,
0.80267509e-05,
0.79525425e-05,
0.78568110e-05,
0.77433706e-05,
0.76151509e-05,
0.74745158e-05,
0.73207625e-05,
0.71528843e-05,
0.69720704e-05,
0.67805959e-05,
0.65831077e-05,
0.63738212e-05,
0.61485744e-05,
0.59146905e-05,
0.56752488e-05,
0.54273883e-05,
0.51709094e-05,
0.49056021e-05,
0.46384298e-05,
0.43654286e-05,
0.40883574e-05,
0.38112394e-05,
0.35359881e-05,
0.32583384e-05,
0.29773378e-05,
0.27006156e-05,
0.24232718e-05,
0.21512824e-05,
0.18814886e-05,
0.16229329e-05,
0.13672592e-05,
0.11198919e-05,
0.88442783e-06,
0.65489411e-06,
0.43578387e-06,
0.22484058e-06,
0.16117639e-07,
-0.19059540e-06,
-0.39797052e-06,
-0.60633920e-06,
-0.82165008e-06,
-0.10446255e-05,
-0.12752704e-05,
-0.15192638e-05,
-0.17732648e-05,
-0.20478196e-05,
-0.23534012e-05,
-0.27163851e-05,
-0.31711115e-05,
-0.38089131e-05,
-0.47909707e-05,
-0.64142382e-05,
-0.92423534e-05,
-0.14209581e-04,
-0.22665536e-04,
-0.35952206e-04,
-0.53973064e-04,
-0.72565315e-04,
-0.81414371e-04,
-0.68077694e-04,
-0.29477938e-04,
0.17784128e-04,
0.45754921e-04,
0.40691128e-04,
0.14446910e-04,
-0.11559983e-04,
-0.26320620e-04,
-0.31904819e-04,
-0.34839621e-04,
-0.39709423e-04,
-0.48360336e-04,
-0.61561077e-04,
-0.80702943e-04,
-0.10915505e-03,
-0.15237252e-03,
-0.21185735e-03,
-0.26881794e-03,
-0.27586648e-03,
-0.19191137e-03,
-0.36795147e-04,
0.11542100e-03,
0.20291690e-03,
0.21544744e-03,
0.17828873e-03,
0.12219916e-03,
0.68575428e-04,
0.28294970e-04,
0.53851386e-05,
]
)
return spherical_albedo, albedo, expected_r1
| 27.33128 | 77 | 0.43567 |
import numpy as np
from mishchenko_brf.lib.refl import brf
def test_brf():
ssalb, _, legendre = setup()
_, spherical_albedo, albedo, _, r = brf(ssalb, len(legendre), legendre)
exptected_spherical_albedo, expected_albedo, expected_r1 = results()
np.testing.assert_allclose(albedo, expected_albedo, atol=1e-6, rtol=0)
r1 = np.concatenate([r[1, i, : i + 1] for i in range(r.shape[1])])
np.testing.assert_allclose(r1, expected_r1, atol=1e-5, rtol=0)
def setup():
ssalb = 0.85404045e00
Legendre_coef = [
0.1000000e01,
0.2512562e01,
0.3759305e01,
0.4408389e01,
0.5536463e01,
0.6260982e01,
0.7525636e01,
0.8312788e01,
0.9542491e01,
0.1040885e02,
0.1151645e02,
0.1244280e02,
0.1343854e02,
0.1442002e02,
0.1533074e02,
0.1628946e02,
0.1717182e02,
0.1807816e02,
0.1898665e02,
0.1978204e02,
0.2073036e02,
0.2142049e02,
0.2241713e02,
0.2301598e02,
0.2401247e02,
0.2456069e02,
0.2552589e02,
0.2607232e02,
0.2695832e02,
0.2752722e02,
0.2831653e02,
0.2892665e02,
0.2962000e02,
0.3025463e02,
0.3086891e02,
0.3150598e02,
0.3207453e02,
0.3268248e02,
0.3323146e02,
0.3378450e02,
0.3433640e02,
0.3482245e02,
0.3538333e02,
0.3580127e02,
0.3636525e02,
0.3672771e02,
0.3728034e02,
0.3760455e02,
0.3812729e02,
0.3843143e02,
0.3890899e02,
0.3920687e02,
0.3962926e02,
0.3992750e02,
0.4029233e02,
0.4059085e02,
0.4090206e02,
0.4119521e02,
0.4146075e02,
0.4174035e02,
0.4196960e02,
0.4222747e02,
0.4242858e02,
0.4265858e02,
0.4283710e02,
0.4303618e02,
0.4319451e02,
0.4336267e02,
0.4350045e02,
0.4364008e02,
0.4375514e02,
0.4386992e02,
0.4395939e02,
0.4405318e02,
0.4411450e02,
0.4419058e02,
0.4422209e02,
0.4428264e02,
0.4428387e02,
0.4432995e02,
0.4430155e02,
0.4433324e02,
0.4427669e02,
0.4429349e02,
0.4421068e02,
0.4421187e02,
0.4410481e02,
0.4408970e02,
0.4396023e02,
0.4392847e02,
0.4377812e02,
0.4372965e02,
0.4355963e02,
0.4349478e02,
0.4330600e02,
0.4322534e02,
0.4301853e02,
0.4292279e02,
0.4269857e02,
0.4258853e02,
0.4234756e02,
0.4222396e02,
0.4196694e02,
0.4183048e02,
0.4155822e02,
0.4140946e02,
0.4112286e02,
0.4096230e02,
0.4066235e02,
0.4049042e02,
0.4017813e02,
0.3999523e02,
0.3967166e02,
0.3947818e02,
0.3914435e02,
0.3894071e02,
0.3859761e02,
0.3838426e02,
0.3803282e02,
0.3781024e02,
0.3745135e02,
0.3722008e02,
0.3685457e02,
0.3661513e02,
0.3624382e02,
0.3599677e02,
0.3562045e02,
0.3536629e02,
0.3498576e02,
0.3472502e02,
0.3434105e02,
0.3407421e02,
0.3368756e02,
0.3341511e02,
0.3302651e02,
0.3274894e02,
0.3235911e02,
0.3207689e02,
0.3168649e02,
0.3140011e02,
0.3100977e02,
0.3071973e02,
0.3033004e02,
0.3003681e02,
0.2964833e02,
0.2935240e02,
0.2896567e02,
0.2866749e02,
0.2828303e02,
0.2798304e02,
0.2760134e02,
0.2729996e02,
0.2692148e02,
0.2661913e02,
0.2624432e02,
0.2594138e02,
0.2557065e02,
0.2526752e02,
0.2490123e02,
0.2459831e02,
0.2423680e02,
0.2393445e02,
0.2357803e02,
0.2327663e02,
0.2292556e02,
0.2262546e02,
0.2228000e02,
0.2198153e02,
0.2164193e02,
0.2134540e02,
0.2101185e02,
0.2071756e02,
0.2039027e02,
0.2009849e02,
0.1977763e02,
0.1948862e02,
0.1917433e02,
0.1888834e02,
0.1858075e02,
0.1829802e02,
0.1799722e02,
0.1771798e02,
0.1742405e02,
0.1714851e02,
0.1686151e02,
0.1658986e02,
0.1630983e02,
0.1604226e02,
0.1576923e02,
0.1550590e02,
0.1523988e02,
0.1498093e02,
0.1472193e02,
0.1446749e02,
0.1421550e02,
0.1396569e02,
0.1372068e02,
0.1347561e02,
0.1323754e02,
0.1299730e02,
0.1276613e02,
0.1253080e02,
0.1230645e02,
0.1207611e02,
0.1185852e02,
0.1163322e02,
0.1142231e02,
0.1120210e02,
0.1099778e02,
0.1078270e02,
0.1058487e02,
0.1037494e02,
0.1018351e02,
0.9978738e01,
0.9793600e01,
0.9593997e01,
0.9415044e01,
0.9220600e01,
0.9047715e01,
0.8858418e01,
0.8691482e01,
0.8507312e01,
0.8346198e01,
0.8167129e01,
0.8011710e01,
0.7837708e01,
0.7687854e01,
0.7518876e01,
0.7374456e01,
0.7210453e01,
0.7071336e01,
0.6912254e01,
0.6778307e01,
0.6624084e01,
0.6495174e01,
0.6345745e01,
0.6221738e01,
0.6077033e01,
0.5957794e01,
0.5817740e01,
0.5703134e01,
0.5567654e01,
0.5457548e01,
0.5326563e01,
0.5220821e01,
0.5094248e01,
0.4992739e01,
0.4870492e01,
0.4773085e01,
0.4655075e01,
0.4561642e01,
0.4447778e01,
0.4358190e01,
0.4248381e01,
0.4162514e01,
0.4056666e01,
0.3974395e01,
0.3872413e01,
0.3793618e01,
0.3695406e01,
0.3619966e01,
0.3525429e01,
0.3453228e01,
0.3362271e01,
0.3293193e01,
0.3205718e01,
0.3139651e01,
0.3055564e01,
0.2992397e01,
0.2911601e01,
0.2851228e01,
0.2773628e01,
0.2715944e01,
0.2641446e01,
0.2586348e01,
0.2514857e01,
0.2462248e01,
0.2393671e01,
0.2343453e01,
0.2277698e01,
0.2229778e01,
0.2166754e01,
0.2121041e01,
0.2060659e01,
0.2017065e01,
0.1959237e01,
0.1917674e01,
0.1862314e01,
0.1822700e01,
0.1769722e01,
0.1731977e01,
0.1681298e01,
0.1645344e01,
0.1596882e01,
0.1562643e01,
0.1516319e01,
0.1483723e01,
0.1439458e01,
0.1408435e01,
0.1366152e01,
0.1336633e01,
0.1296260e01,
0.1268180e01,
0.1229642e01,
0.1202937e01,
0.1166165e01,
0.1140775e01,
0.1105699e01,
0.1081566e01,
0.1048119e01,
0.1025186e01,
0.9933033e00,
0.9715168e00,
0.9411347e00,
0.9204422e00,
0.8914999e00,
0.8718511e00,
0.8442892e00,
0.8256361e00,
0.7993970e00,
0.7816934e00,
0.7567216e00,
0.7399231e00,
0.7161648e00,
0.7002287e00,
0.6776319e00,
0.6625175e00,
0.6410319e00,
0.6267001e00,
0.6062772e00,
0.5926905e00,
0.5732835e00,
0.5604061e00,
0.5419698e00,
0.5297674e00,
0.5122584e00,
0.5006981e00,
0.4840745e00,
0.4731249e00,
0.4573463e00,
0.4469774e00,
0.4320051e00,
0.4221882e00,
0.4079849e00,
0.3986924e00,
0.3852225e00,
0.3764283e00,
0.3636572e00,
0.3553362e00,
0.3432310e00,
0.3353594e00,
0.3238883e00,
0.3164434e00,
0.3055761e00,
0.2985361e00,
0.2882435e00,
0.2815877e00,
0.2718419e00,
0.2655505e00,
0.2563248e00,
0.2503791e00,
0.2416479e00,
0.2360299e00,
0.2277687e00,
0.2224615e00,
0.2146470e00,
0.2096341e00,
0.2022440e00,
0.1975101e00,
0.1905229e00,
0.1860533e00,
0.1794487e00,
0.1752294e00,
0.1689879e00,
0.1650056e00,
0.1591086e00,
0.1553506e00,
0.1497804e00,
0.1462348e00,
0.1409744e00,
0.1376297e00,
0.1326632e00,
0.1295086e00,
0.1248204e00,
0.1218456e00,
0.1174212e00,
0.1146165e00,
0.1104419e00,
0.1077980e00,
0.1038600e00,
0.1013680e00,
0.9765404e-01,
0.9530568e-01,
0.9180371e-01,
0.8959107e-01,
0.8628968e-01,
0.8420526e-01,
0.8109362e-01,
0.7913032e-01,
0.7619812e-01,
0.7434919e-01,
0.7158666e-01,
0.6984571e-01,
0.6724356e-01,
0.6560456e-01,
0.6315396e-01,
0.6161117e-01,
0.5930377e-01,
0.5785177e-01,
0.5567961e-01,
0.5431328e-01,
0.5226885e-01,
0.5098332e-01,
0.4905947e-01,
0.4785016e-01,
0.4604013e-01,
0.4490269e-01,
0.4320006e-01,
0.4213037e-01,
0.4052908e-01,
0.3952327e-01,
0.3801755e-01,
0.3707194e-01,
0.3565637e-01,
0.3476748e-01,
0.3343689e-01,
0.3260145e-01,
0.3135097e-01,
0.3056588e-01,
0.2939089e-01,
0.2865322e-01,
0.2754936e-01,
0.2685635e-01,
0.2581950e-01,
0.2516853e-01,
0.2419479e-01,
0.2358340e-01,
0.2266910e-01,
0.2209496e-01,
0.2123660e-01,
0.2069752e-01,
0.1989183e-01,
0.1938574e-01,
0.1862960e-01,
0.1815454e-01,
0.1744504e-01,
0.1699918e-01,
0.1633353e-01,
0.1591513e-01,
0.1529073e-01,
0.1489815e-01,
0.1431255e-01,
0.1394424e-01,
0.1339511e-01,
0.1304962e-01,
0.1253477e-01,
0.1221073e-01,
0.1172810e-01,
0.1142421e-01,
0.1097185e-01,
0.1068691e-01,
0.1026299e-01,
0.9995839e-02,
0.9598632e-02,
0.9348197e-02,
0.8976075e-02,
0.8741341e-02,
0.8392774e-02,
0.8172784e-02,
0.7846332e-02,
0.7640186e-02,
0.7334491e-02,
0.7141344e-02,
0.6855129e-02,
0.6674184e-02,
0.6406246e-02,
0.6236754e-02,
0.5985964e-02,
0.5827218e-02,
0.5592512e-02,
0.5443850e-02,
0.5224227e-02,
0.5085025e-02,
0.4879544e-02,
0.4749216e-02,
0.4556995e-02,
0.4434989e-02,
0.4255197e-02,
0.4140997e-02,
0.3972854e-02,
0.3865974e-02,
0.3708747e-02,
0.3608730e-02,
0.3461733e-02,
0.3368151e-02,
0.3230736e-02,
0.3143185e-02,
0.3014746e-02,
0.2932847e-02,
0.2812813e-02,
0.2736210e-02,
0.2624045e-02,
0.2552404e-02,
0.2447605e-02,
0.2380611e-02,
0.2282708e-02,
0.2220067e-02,
0.2128618e-02,
0.2070056e-02,
0.1984648e-02,
0.1929906e-02,
0.1850152e-02,
0.1798987e-02,
0.1724522e-02,
0.1676708e-02,
0.1607190e-02,
0.1562511e-02,
0.1497620e-02,
0.1455875e-02,
0.1395308e-02,
0.1356308e-02,
0.1299785e-02,
0.1263353e-02,
0.1210611e-02,
0.1176583e-02,
0.1127375e-02,
0.1095598e-02,
0.1049696e-02,
0.1020025e-02,
0.9772121e-03,
0.9495118e-03,
0.9095846e-03,
0.8837259e-03,
0.8464937e-03,
0.8223557e-03,
0.7876397e-03,
0.7651098e-03,
0.7327438e-03,
0.7117181e-03,
0.6815480e-03,
0.6619300e-03,
0.6338120e-03,
0.6155113e-03,
0.5893102e-03,
0.5722408e-03,
0.5478283e-03,
0.5319081e-03,
0.5091634e-03,
0.4943152e-03,
0.4731258e-03,
0.4592787e-03,
0.4395410e-03,
0.4266303e-03,
0.4082493e-03,
0.3962155e-03,
0.3791019e-03,
0.3678880e-03,
0.3519565e-03,
0.3415071e-03,
0.3266762e-03,
0.3169383e-03,
0.3031319e-03,
0.2940574e-03,
0.2812067e-03,
0.2727531e-03,
0.2607960e-03,
0.2529247e-03,
0.2418025e-03,
0.2344753e-03,
0.2241307e-03,
0.2173091e-03,
0.2076865e-03,
0.2013343e-03,
0.1923831e-03,
0.1864691e-03,
0.1781456e-03,
0.1726433e-03,
0.1649075e-03,
0.1597912e-03,
0.1526032e-03,
0.1478454e-03,
0.1411648e-03,
0.1367381e-03,
0.1305278e-03,
0.1264096e-03,
0.1206392e-03,
0.1168121e-03,
0.1114548e-03,
0.1079011e-03,
0.1029286e-03,
0.9962750e-04,
0.9500928e-04,
0.9194008e-04,
0.8765030e-04,
0.8479853e-04,
0.8081775e-04,
0.7817267e-04,
0.7448255e-04,
0.7203030e-04,
0.6860758e-04,
0.6633051e-04,
0.6315326e-04,
0.6103854e-04,
0.5809158e-04,
0.5613237e-04,
0.5340456e-04,
0.5159216e-04,
0.4906537e-04,
0.4738409e-04,
0.4504024e-04,
0.4348066e-04,
0.4130949e-04,
0.3986734e-04,
0.3786116e-04,
0.3653038e-04,
0.3467413e-04,
0.3343939e-04,
0.3171734e-04,
0.3057505e-04,
0.2898660e-04,
0.2793625e-04,
0.2646920e-04,
0.2549635e-04,
0.2413735e-04,
0.2323900e-04,
0.2198645e-04,
0.2116049e-04,
0.2000504e-04,
0.1924253e-04,
0.1817554e-04,
0.1747326e-04,
0.1649066e-04,
0.1584529e-04,
0.1494019e-04,
0.1434660e-04,
0.1351308e-04,
0.1296814e-04,
0.1220153e-04,
0.1170191e-04,
0.1099720e-04,
0.1053953e-04,
0.9892289e-05,
]
return (
ssalb,
len(Legendre_coef),
np.pad(Legendre_coef, (0, 700 - len(Legendre_coef))),
)
def results():
spherical_albedo = 0.1400516239529828
albedo = [
0.57934552e00,
0.55945677e00,
0.53431237e00,
0.50788230e00,
0.48296762e00,
0.46127653e00,
0.44329438e00,
0.42849159e00,
0.41589457e00,
0.40462923e00,
0.39412692e00,
0.38407087e00,
0.37428829e00,
0.36468229e00,
0.35519615e00,
0.34579977e00,
0.33647874e00,
0.32722980e00,
0.31805637e00,
0.30896705e00,
0.29997292e00,
0.29108667e00,
0.28232241e00,
0.27369434e00,
0.26521713e00,
0.25690463e00,
0.24876949e00,
0.24082196e00,
0.23306957e00,
0.22551830e00,
0.21817389e00,
0.21104220e00,
0.20412904e00,
0.19744009e00,
0.19098036e00,
0.18475346e00,
0.17876221e00,
0.17300782e00,
0.16749054e00,
0.16220950e00,
0.15716265e00,
0.15234718e00,
0.14775957e00,
0.14339539e00,
0.13924994e00,
0.13531761e00,
0.13159263e00,
0.12806895e00,
0.12473993e00,
0.12159910e00,
0.11863959e00,
0.11585440e00,
0.11323670e00,
0.11077949e00,
0.10847593e00,
0.10631904e00,
0.10430222e00,
0.10241879e00,
0.10066233e00,
0.99026598e-01,
0.97505502e-01,
0.96093059e-01,
0.94783649e-01,
0.93571737e-01,
0.92452131e-01,
0.91419615e-01,
0.90469383e-01,
0.89596771e-01,
0.88797286e-01,
0.88066630e-01,
0.87400697e-01,
0.86795583e-01,
0.86247541e-01,
0.85752994e-01,
0.85308485e-01,
0.84910698e-01,
0.84556349e-01,
0.84242381e-01,
0.83965667e-01,
0.83723314e-01,
0.83512425e-01,
0.83330259e-01,
0.83174184e-01,
0.83041623e-01,
0.82930155e-01,
0.82837544e-01,
0.82761563e-01,
0.82700156e-01,
0.82651392e-01,
0.82613394e-01,
0.82584500e-01,
0.82563184e-01,
0.82548007e-01,
0.82537644e-01,
0.82530975e-01,
0.82526997e-01,
0.82524881e-01,
0.82523920e-01,
0.82523584e-01,
0.82523517e-01,
]
expected_r1 = np.array(
[
0.38368369e03,
0.25770578e03,
0.23945151e03,
0.16855780e03,
0.18244296e03,
0.16236093e03,
0.10911653e03,
0.12755070e03,
0.12647316e03,
0.10830920e03,
0.67582947e02,
0.83225288e02,
0.89081558e02,
0.82212189e02,
0.66570000e02,
0.39153576e02,
0.50326321e02,
0.57166462e02,
0.56031170e02,
0.47971886e02,
0.36434990e02,
0.21253523e02,
0.28237167e02,
0.33642113e02,
0.34688950e02,
0.31254200e02,
0.25037840e02,
0.18253815e02,
0.11248275e02,
0.15133494e02,
0.18625698e02,
0.20033745e02,
0.18957489e02,
0.16075739e02,
0.12519300e02,
0.92340946e01,
0.62269855e01,
0.82350597e01,
0.10240828e02,
0.11357998e02,
0.11247568e02,
0.10105079e02,
0.84183950e01,
0.66705170e01,
0.51578894e01,
0.37987945e01,
0.48435707e01,
0.59637489e01,
0.67243404e01,
0.69045143e01,
0.65221829e01,
0.57613211e01,
0.48524532e01,
0.39752564e01,
0.32225568e01,
0.25505664e01,
0.31431477e01,
0.38052323e01,
0.43142323e01,
0.45352106e01,
0.44408669e01,
0.40962334e01,
0.36127684e01,
0.30965683e01,
0.26170671e01,
0.22043598e01,
0.18349921e01,
0.22099471e01,
0.26385496e01,
0.29933913e01,
0.31896119e01,
0.31966636e01,
0.30375271e01,
0.27696035e01,
0.24563296e01,
0.21455364e01,
0.18630074e01,
0.16174023e01,
0.13863298e01,
0.16447055e01,
0.19445310e01,
0.22046304e01,
0.23685496e01,
0.24110959e01,
0.23400669e01,
0.21870027e01,
0.19907905e01,
0.17838671e01,
0.15866874e01,
0.14085795e01,
0.12514458e01,
0.10881330e01,
0.12770401e01,
0.14988452e01,
0.16977799e01,
0.18342333e01,
0.18884524e01,
0.18623250e01,
0.17742110e01,
0.16486713e01,
0.15075257e01,
0.13662242e01,
0.12339157e01,
0.11142954e01,
0.10071722e01,
0.88128895e00,
0.10257436e01,
0.11969687e01,
0.13544649e01,
0.14691297e01,
0.15254281e01,
0.15228883e01,
0.14728941e01,
0.13916924e01,
0.12941229e01,
0.11911522e01,
0.10903227e01,
0.99648142e00,
0.91146982e00,
0.83437926e00,
0.73236907e00,
0.84691751e00,
0.98372436e00,
0.11120189e01,
0.12095475e01,
0.12638915e01,
0.12736813e01,
0.12464422e01,
0.11935405e01,
0.11255139e01,
0.10501394e01,
0.97279239e00,
0.89753741e00,
0.82755452e00,
0.76412642e00,
0.70607662e00,
0.62143141e00,
0.71498531e00,
0.82739562e00,
0.93439400e00,
0.10183749e01,
0.10692183e01,
0.10852710e01,
0.10717980e01,
0.10371528e01,
0.98918498e00,
0.93369889e00,
0.87458736e00,
0.81457925e00,
0.75610143e00,
0.70160747e00,
0.65229672e00,
0.60690910e00,
0.53620493e00,
0.61444676e00,
0.70891893e00,
0.79989344e00,
0.87304217e00,
0.91996312e00,
0.93897974e00,
0.93395931e00,
0.91129071e00,
0.87703383e00,
0.83570266e00,
0.79033947e00,
0.74295175e00,
0.69498348e00,
0.64796978e00,
0.60397410e00,
0.56429613e00,
0.52768981e00,
0.46893141e00,
0.53562319e00,
0.61647099e00,
0.69504291e00,
0.75940472e00,
0.80246264e00,
0.82262319e00,
0.82284969e00,
0.80819505e00,
0.78347129e00,
0.75227189e00,
0.71705294e00,
0.67947024e00,
0.64061898e00,
0.60127056e00,
0.56240505e00,
0.52581406e00,
0.49295956e00,
0.46267310e00,
0.41456842e00,
0.47228998e00,
0.54249328e00,
0.61122215e00,
0.66834646e00,
0.70778871e00,
0.72807664e00,
0.73156416e00,
0.72236371e00,
0.70439237e00,
0.68056595e00,
0.65288788e00,
0.62274611e00,
0.59110469e00,
0.55858999e00,
0.52560019e00,
0.49273926e00,
0.46156633e00,
0.43370983e00,
0.40812615e00,
0.36973703e00,
0.42031151e00,
0.48198968e00,
0.54273206e00,
0.59380746e00,
0.62994283e00,
0.64979416e00,
0.65528655e00,
0.64984703e00,
0.63673460e00,
0.61836016e00,
0.59637630e00,
0.57194269e00,
0.54589856e00,
0.51883745e00,
0.49112943e00,
0.46296096e00,
0.43467191e00,
0.40760121e00,
0.38351870e00,
0.36154264e00,
0.33207551e00,
0.37681752e00,
0.43150941e00,
0.48563948e00,
0.53159100e00,
0.56474036e00,
0.58386314e00,
0.59056675e00,
0.58775848e00,
0.57819819e00,
0.56391406e00,
0.54628950e00,
0.52629930e00,
0.50466305e00,
0.48191690e00,
0.45844156e00,
0.43446112e00,
0.41004205e00,
0.38533735e00,
0.36147287e00,
0.34032100e00,
0.32119045e00,
0.29983068e00,
0.33969635e00,
0.38853076e00,
0.43707687e00,
0.47863159e00,
0.50910699e00,
0.52738410e00,
0.53481984e00,
0.53390729e00,
0.52700996e00,
0.51585591e00,
0.50161958e00,
0.48513207e00,
0.46701470e00,
0.44774175e00,
0.42767024e00,
0.40705225e00,
0.38602614e00,
0.36459178e00,
0.34277132e00,
0.32148623e00,
0.30266759e00,
0.28583673e00,
0.27165741e00,
0.30735224e00,
0.35116890e00,
0.39490715e00,
0.43263298e00,
0.46071306e00,
0.47812337e00,
0.48601636e00,
0.48654056e00,
0.48172772e00,
0.47305444e00,
0.46152285e00,
0.44784895e00,
0.43257853e00,
0.41613755e00,
0.39885530e00,
0.38097894e00,
0.36268044e00,
0.34404564e00,
0.32503796e00,
0.30558982e00,
0.28643203e00,
0.26951250e00,
0.25458133e00,
0.24664548e00,
0.27872956e00,
0.31819224e00,
0.35773003e00,
0.39206341e00,
0.41795400e00,
0.43447414e00,
0.44261932e00,
0.44425684e00,
0.44113833e00,
0.43451858e00,
0.42523941e00,
0.41390744e00,
0.40100044e00,
0.38690761e00,
0.37194157e00,
0.35634446e00,
0.34029481e00,
0.32391262e00,
0.30725011e00,
0.29025167e00,
0.27279079e00,
0.25542563e00,
0.24008393e00,
0.22675417e00,
0.22445151e00,
0.25343820e00,
0.28914347e00,
0.32500827e00,
0.35629919e00,
0.38011837e00,
0.39564186e00,
0.40376574e00,
0.40615430e00,
0.40434766e00,
0.39941984e00,
0.39206272e00,
0.38276103e00,
0.37190381e00,
0.35982931e00,
0.34683278e00,
0.33315977e00,
0.31900054e00,
0.30449098e00,
0.28971705e00,
0.27470860e00,
0.25940111e00,
0.24363182e00,
0.22780687e00,
0.21380231e00,
0.20184751e00,
0.20519748e00,
0.23159876e00,
0.26413625e00,
0.29684672e00,
0.32543322e00,
0.34727564e00,
0.36164755e00,
0.36939174e00,
0.37204832e00,
0.37103242e00,
0.36731219e00,
0.36147776e00,
0.35390341e00,
0.34486532e00,
0.33460709e00,
0.32336712e00,
0.31137651e00,
0.29884037e00,
0.28591970e00,
0.27272525e00,
0.25932097e00,
0.24572186e00,
0.23185994e00,
0.21755250e00,
0.20307408e00,
0.19022347e00,
0.17946769e00,
0.18898228e00,
0.21327148e00,
0.24319792e00,
0.27327064e00,
0.29953399e00,
0.31957966e00,
0.33274972e00,
0.33984205e00,
0.34230980e00,
0.34150216e00,
0.33835727e00,
0.33344826e00,
0.32711285e00,
0.31955174e00,
0.31089956e00,
0.30128181e00,
0.29084933e00,
0.27977982e00,
0.26825124e00,
0.25640994e00,
0.24435396e00,
0.23213391e00,
0.21975470e00,
0.20714773e00,
0.19412118e00,
0.18083785e00,
0.16899987e00,
0.15930425e00,
0.17541476e00,
0.19795303e00,
0.22571321e00,
0.25359881e00,
0.27793473e00,
0.29647639e00,
0.30860057e00,
0.31503823e00,
0.31714112e00,
0.31618607e00,
0.31309542e00,
0.30847403e00,
0.30271024e00,
0.29603517e00,
0.28855911e00,
0.28031746e00,
0.27133235e00,
0.26166755e00,
0.25144571e00,
0.24082130e00,
0.22993420e00,
0.21887848e00,
0.20769787e00,
0.19639082e00,
0.18488961e00,
0.17300032e00,
0.16079190e00,
0.14985578e00,
0.14110740e00,
0.16366631e00,
0.18467116e00,
0.21054901e00,
0.23656210e00,
0.25929046e00,
0.27663431e00,
0.28799024e00,
0.29400581e00,
0.29590416e00,
0.29484844e00,
0.29169577e00,
0.28704572e00,
0.28133944e00,
0.27490255e00,
0.26794240e00,
0.26054066e00,
0.25267535e00,
0.24428359e00,
0.23534042e00,
0.22590491e00,
0.21610361e00,
0.20607288e00,
0.19590905e00,
0.18565454e00,
0.17530420e00,
0.16479163e00,
0.15392394e00,
0.14269111e00,
0.13256522e00,
0.12466694e00,
0.15291582e00,
0.17249423e00,
0.19663572e00,
0.22094680e00,
0.24225558e00,
0.25860691e00,
0.26942277e00,
0.27527362e00,
0.27725279e00,
0.27639005e00,
0.27343193e00,
0.26890206e00,
0.26321408e00,
0.25673062e00,
0.24976483e00,
0.24254556e00,
0.23517576e00,
0.22762197e00,
0.21976374e00,
0.21149129e00,
0.20278960e00,
0.19374785e00,
0.18449736e00,
0.17514092e00,
0.16572388e00,
0.15623912e00,
0.14661992e00,
0.13667518e00,
0.12632957e00,
0.11693286e00,
0.10979707e00,
0.14267196e00,
0.16087982e00,
0.18335804e00,
0.20604582e00,
0.22601375e00,
0.24145372e00,
0.25182438e00,
0.25763780e00,
0.25987828e00,
0.25945812e00,
0.25701439e00,
0.25296855e00,
0.24764267e00,
0.24133593e00,
0.23435625e00,
0.22701317e00,
0.21957387e00,
0.21219650e00,
0.20488037e00,
0.19748402e00,
0.18982655e00,
0.18181197e00,
0.17347980e00,
0.16495080e00,
0.15633532e00,
0.14768384e00,
0.13898759e00,
0.13017787e00,
0.12106603e00,
0.11152479e00,
0.10278418e00,
0.96333064e-01,
0.13278867e00,
0.14967601e00,
0.17054874e00,
0.19166234e00,
0.21031891e00,
0.22485405e00,
0.23476954e00,
0.24053511e00,
0.24305005e00,
0.24313784e00,
0.24135487e00,
0.23804264e00,
0.23343392e00,
0.22772875e00,
0.22114034e00,
0.21392128e00,
0.20636588e00,
0.19877388e00,
0.19137226e00,
0.18422794e00,
0.17721902e00,
0.17011781e00,
0.16274700e00,
0.15508358e00,
0.14722840e00,
0.13929746e00,
0.13134745e00,
0.12336826e00,
0.11528943e00,
0.10692697e00,
0.98114364e-01,
0.89966424e-01,
0.84133029e-01,
0.12330588e00,
0.13893479e00,
0.15827183e00,
0.17786814e00,
0.19524175e00,
0.20886324e00,
0.21827731e00,
0.22391967e00,
0.22662000e00,
0.22713451e00,
0.22596700e00,
0.22341314e00,
0.21965274e00,
0.21481460e00,
0.20901735e00,
0.20240258e00,
0.19516377e00,
0.18756257e00,
0.17991112e00,
0.17249928e00,
0.16548070e00,
0.15879016e00,
0.15218471e00,
0.14541364e00,
0.13838096e00,
0.13115591e00,
0.12385615e00,
0.11654575e00,
0.10921578e00,
0.10179584e00,
0.94108447e-01,
0.85958004e-01,
0.78349575e-01,
0.73076993e-01,
0.11430455e00,
0.12874915e00,
0.14663576e00,
0.16478880e00,
0.18092515e00,
0.19363941e00,
0.20251557e00,
0.20795847e00,
0.21073578e00,
0.21154700e00,
0.21085797e00,
0.20893978e00,
0.20595059e00,
0.20198931e00,
0.19712524e00,
0.19142093e00,
0.18495877e00,
0.17787308e00,
0.17037868e00,
0.16277534e00,
0.15539503e00,
0.14847951e00,
0.14204761e00,
0.13587566e00,
0.12965593e00,
0.12321350e00,
0.11657458e00,
0.10985404e00,
0.10312499e00,
0.96382633e-01,
0.89558579e-01,
0.82482606e-01,
0.74937083e-01,
0.67823343e-01,
0.63062489e-01,
0.10584079e00,
0.11918116e00,
0.13571160e00,
0.15250790e00,
0.16746905e00,
0.17930275e00,
0.18762796e00,
0.19281991e00,
0.19558841e00,
0.19658093e00,
0.19623034e00,
0.19479063e00,
0.19241145e00,
0.18918501e00,
0.18516850e00,
0.18039672e00,
0.17489515e00,
0.16870056e00,
0.16189243e00,
0.15463088e00,
0.14718156e00,
0.13989125e00,
0.13307634e00,
0.12685405e00,
0.12105249e00,
0.11533057e00,
0.10943508e00,
0.10333905e00,
0.97149357e-01,
0.90949543e-01,
0.84741533e-01,
0.78459755e-01,
0.71940817e-01,
0.64950287e-01,
0.58292422e-01,
0.53999700e-01,
0.97934157e-01,
0.11025076e00,
0.12552127e00,
0.14105226e00,
0.15490949e00,
0.16590366e00,
0.17368492e00,
0.17860012e00,
0.18130451e00,
0.18239634e00,
0.18227696e00,
0.18118428e00,
0.17926148e00,
0.17660023e00,
0.17325978e00,
0.16927499e00,
0.16466121e00,
0.15942113e00,
0.15355882e00,
0.14710733e00,
0.14017075e00,
0.13296603e00,
0.12582819e00,
0.11912578e00,
0.11307607e00,
0.10758535e00,
0.10230618e00,
0.96913703e-01,
0.91320075e-01,
0.85618503e-01,
0.79903029e-01,
0.74183889e-01,
0.68398476e-01,
0.62389236e-01,
0.55908926e-01,
0.49671900e-01,
0.45807466e-01,
0.90577021e-01,
0.10194721e00,
0.11605130e00,
0.13040775e00,
0.14323507e00,
0.15343815e00,
0.16069512e00,
0.16532598e00,
0.16793491e00,
0.16907367e00,
0.16911317e00,
0.16827461e00,
0.16669342e00,
0.16445951e00,
0.16163501e00,
0.15826119e00,
0.15436088e00,
0.14993919e00,
0.14498582e00,
0.13948333e00,
0.13342866e00,
0.12687427e00,
0.11998184e00,
0.11305442e00,
0.10648688e00,
0.10058656e00,
0.95354967e-01,
0.90465494e-01,
0.85533582e-01,
0.80405675e-01,
0.75155161e-01,
0.69885492e-01,
0.64615801e-01,
0.59286319e-01,
0.53744264e-01,
0.47732841e-01,
0.41884389e-01,
0.38411867e-01,
0.83746620e-01,
0.94243065e-01,
0.10726915e00,
0.12053798e00,
0.13240825e00,
0.14187106e00,
0.14862999e00,
0.15297991e00,
0.15547749e00,
0.15663114e00,
0.15678266e00,
0.15613645e00,
0.15481880e00,
0.15291539e00,
0.15048827e00,
0.14758278e00,
0.14422987e00,
0.14044581e00,
0.13623075e00,
0.13156785e00,
0.12642694e00,
0.12077967e00,
0.11463551e00,
0.10810074e00,
0.10143317e00,
0.95029272e-01,
0.89268476e-01,
0.84254339e-01,
0.79705626e-01,
0.75194120e-01,
0.70498869e-01,
0.65667070e-01,
0.60809318e-01,
0.55953730e-01,
0.51043399e-01,
0.45929730e-01,
0.40349029e-01,
0.34859274e-01,
0.31745121e-01,
0.77413671e-01,
0.87103941e-01,
0.99134557e-01,
0.11139757e00,
0.12238043e00,
0.13115339e00,
0.13744320e00,
0.14152151e00,
0.14390105e00,
0.14504991e00,
0.14528263e00,
0.14478727e00,
0.14368038e00,
0.14204189e00,
0.13993120e00,
0.13739403e00,
0.13446525e00,
0.13116941e00,
0.12751934e00,
0.12351336e00,
0.11913250e00,
0.11434042e00,
0.10909266e00,
0.10336579e00,
0.97215243e-01,
0.90846524e-01,
0.84636919e-01,
0.79015903e-01,
0.74187510e-01,
0.69935963e-01,
0.65807395e-01,
0.61514482e-01,
0.57072140e-01,
0.52595474e-01,
0.48121743e-01,
0.43596964e-01,
0.38876079e-01,
0.33690531e-01,
0.28531600e-01,
0.25744777e-01,
0.71547434e-01,
0.80494061e-01,
0.91605820e-01,
0.10293934e00,
0.11310040e00,
0.12123200e00,
0.12708212e00,
0.13090093e00,
0.13316067e00,
0.13429219e00,
0.13458471e00,
0.13421088e00,
0.13327757e00,
0.13185826e00,
0.13000821e00,
0.12777114e00,
0.12518245e00,
0.12227035e00,
0.11905541e00,
0.11554869e00,
0.11174847e00,
0.10763626e00,
0.10317403e00,
0.98308414e-01,
0.92992358e-01,
0.87237559e-01,
0.81194960e-01,
0.75207628e-01,
0.69733076e-01,
0.65067738e-01,
0.61075501e-01,
0.57294834e-01,
0.53375702e-01,
0.49295910e-01,
0.45172136e-01,
0.41050550e-01,
0.36880266e-01,
0.32519296e-01,
0.27695602e-01,
0.22840958e-01,
0.20352198e-01,
0.66117376e-01,
0.74378133e-01,
0.84641933e-01,
0.95116824e-01,
0.10451740e00,
0.11205351e00,
0.11749266e00,
0.12106522e00,
0.12320609e00,
0.12431186e00,
0.12464833e00,
0.12437376e00,
0.12358582e00,
0.12235164e00,
0.12072182e00,
0.11873700e00,
0.11643104e00,
0.11383259e00,
0.11096542e00,
0.10784754e00,
0.10448926e00,
0.10088971e00,
0.97032204e-01,
0.92879705e-01,
0.88374905e-01,
0.83454721e-01,
0.78095064e-01,
0.72394073e-01,
0.66651307e-01,
0.61332978e-01,
0.56815393e-01,
0.53050254e-01,
0.49585145e-01,
0.46012942e-01,
0.42270541e-01,
0.38473442e-01,
0.34676433e-01,
0.30831696e-01,
0.26799770e-01,
0.22306219e-01,
0.17730433e-01,
0.15513073e-01,
0.61093956e-01,
0.68722166e-01,
0.78203514e-01,
0.87885372e-01,
0.96582450e-01,
0.10356604e00,
0.10862161e00,
0.11196126e00,
0.11398555e00,
0.11505950e00,
0.11542739e00,
0.11523422e00,
0.11456916e00,
0.11349328e00,
0.11205266e00,
0.11028446e00,
0.10822011e00,
0.10588704e00,
0.10330941e00,
0.10050797e00,
0.97499035e-01,
0.94292536e-01,
0.90888672e-01,
0.87273069e-01,
0.83411098e-01,
0.79244599e-01,
0.74700132e-01,
0.69725469e-01,
0.64370766e-01,
0.58887802e-01,
0.53736102e-01,
0.49357101e-01,
0.45792568e-01,
0.42613342e-01,
0.39362472e-01,
0.35933696e-01,
0.32438610e-01,
0.28940061e-01,
0.25393331e-01,
0.21661280e-01,
0.17468622e-01,
0.13150477e-01,
0.11185951e-01,
0.56448560e-01,
0.63493401e-01,
0.72252735e-01,
0.81202179e-01,
0.89248493e-01,
0.95719531e-01,
0.10041723e00,
0.10353691e00,
0.10544755e00,
0.10648517e00,
0.10687432e00,
0.10674787e00,
0.10618718e00,
0.10524774e00,
0.10397130e00,
0.10239167e00,
0.10053762e00,
0.98434702e-01,
0.96106045e-01,
0.93572617e-01,
0.90852953e-01,
0.87962106e-01,
0.84909752e-01,
0.81697099e-01,
0.78312054e-01,
0.74722745e-01,
0.70871852e-01,
0.66679642e-01,
0.62072858e-01,
0.57060491e-01,
0.51846057e-01,
0.46870694e-01,
0.42625420e-01,
0.39239943e-01,
0.36319576e-01,
0.33365458e-01,
0.30227283e-01,
0.27010450e-01,
0.23785481e-01,
0.20511542e-01,
0.17054949e-01,
0.13141878e-01,
0.90698684e-02,
0.73469649e-02,
0.52153420e-01,
0.58660157e-01,
0.66753164e-01,
0.75026073e-01,
0.82470380e-01,
0.88465959e-01,
0.92829920e-01,
0.95742144e-01,
0.97542584e-01,
0.98540656e-01,
0.98942772e-01,
0.98870747e-01,
0.98398849e-01,
0.97577512e-01,
0.96444599e-01,
0.95030688e-01,
0.93361929e-01,
0.91461726e-01,
0.89351647e-01,
0.87051816e-01,
0.84580876e-01,
0.81955560e-01,
0.79189852e-01,
0.76293178e-01,
0.73267482e-01,
0.70102490e-01,
0.66768855e-01,
0.63210987e-01,
0.59346184e-01,
0.55086352e-01,
0.50406374e-01,
0.45463238e-01,
0.40671837e-01,
0.36558144e-01,
0.33334181e-01,
0.30647837e-01,
0.27966481e-01,
0.25096513e-01,
0.22136096e-01,
0.19164244e-01,
0.16145656e-01,
0.12950187e-01,
0.93025165e-02,
0.54607159e-02,
0.39474810e-02,
0.48182234e-01,
0.54192506e-01,
0.61670437e-01,
0.69318332e-01,
0.76205671e-01,
0.81760220e-01,
0.85813068e-01,
0.88529900e-01,
0.90223983e-01,
0.91180287e-01,
0.91588661e-01,
0.91560833e-01,
0.91164641e-01,
0.90445958e-01,
0.89439072e-01,
0.88171646e-01,
0.86667374e-01,
0.84947526e-01,
0.83031908e-01,
0.80939271e-01,
0.78687482e-01,
0.76293327e-01,
0.73772058e-01,
0.71136616e-01,
0.68396017e-01,
0.65552600e-01,
0.62597387e-01,
0.59503239e-01,
0.56216817e-01,
0.52654829e-01,
0.48719283e-01,
0.44357602e-01,
0.39683431e-01,
0.35080492e-01,
0.31097105e-01,
0.28020034e-01,
0.25545072e-01,
0.23114407e-01,
0.20494236e-01,
0.17775815e-01,
0.15046233e-01,
0.12272255e-01,
0.93193343e-02,
0.59020361e-02,
0.22407323e-02,
0.87492354e-03,
0.44510506e-01,
0.50062627e-01,
0.56972671e-01,
0.64043038e-01,
0.70415020e-01,
0.75560495e-01,
0.79323418e-01,
0.81856459e-01,
0.83448350e-01,
0.84361628e-01,
0.84770963e-01,
0.84778860e-01,
0.84447332e-01,
0.83818108e-01,
0.82922280e-01,
0.81784874e-01,
0.80427296e-01,
0.78868859e-01,
0.77127583e-01,
0.75220726e-01,
0.73164918e-01,
0.70976183e-01,
0.68669744e-01,
0.66259526e-01,
0.63757502e-01,
0.61172180e-01,
0.58505908e-01,
0.55750374e-01,
0.52880324e-01,
0.49845133e-01,
0.46562638e-01,
0.42928446e-01,
0.38867969e-01,
0.34455679e-01,
0.30041935e-01,
0.26187586e-01,
0.23246434e-01,
0.20965882e-01,
0.18771386e-01,
0.16391607e-01,
0.13906728e-01,
0.11403601e-01,
0.88423118e-02,
0.60800756e-02,
0.28280553e-02,
-0.70986536e-03,
-0.19648359e-02,
0.41115671e-01,
0.46244897e-01,
0.52630525e-01,
0.59167176e-01,
0.65062307e-01,
0.69828428e-01,
0.73321380e-01,
0.75681835e-01,
0.77175975e-01,
0.78045711e-01,
0.78451805e-01,
0.78488372e-01,
0.78212120e-01,
0.77661060e-01,
0.76863378e-01,
0.75841703e-01,
0.74615397e-01,
0.73201917e-01,
0.71617633e-01,
0.69878295e-01,
0.67999192e-01,
0.65995254e-01,
0.63880973e-01,
0.61670251e-01,
0.59375945e-01,
0.57009004e-01,
0.54577064e-01,
0.52082047e-01,
0.49516134e-01,
0.46855822e-01,
0.44053324e-01,
0.41028392e-01,
0.37672661e-01,
0.33894073e-01,
0.29732887e-01,
0.25506891e-01,
0.21783372e-01,
0.18975813e-01,
0.16882956e-01,
0.14915733e-01,
0.12760971e-01,
0.10479322e-01,
0.81534600e-02,
0.57432470e-02,
0.31131236e-02,
-0.13105665e-04,
-0.34361165e-02,
-0.45660972e-02,
0.37977196e-01,
0.42716030e-01,
0.48617344e-01,
0.54660756e-01,
0.60114693e-01,
0.64529106e-01,
0.67770794e-01,
0.69969401e-01,
0.71370378e-01,
0.72196685e-01,
0.72596297e-01,
0.72655670e-01,
0.72426699e-01,
0.71944013e-01,
0.71233213e-01,
0.70314772e-01,
0.69206156e-01,
0.67923151e-01,
0.66480570e-01,
0.64892717e-01,
0.63173585e-01,
0.61336990e-01,
0.59396494e-01,
0.57365343e-01,
0.55256244e-01,
0.53080887e-01,
0.50849102e-01,
0.48567444e-01,
0.46237227e-01,
0.43851011e-01,
0.41386928e-01,
0.38799874e-01,
0.36011849e-01,
0.32912444e-01,
0.29396715e-01,
0.25476838e-01,
0.21440787e-01,
0.17856820e-01,
0.15185956e-01,
0.13268581e-01,
0.11497635e-01,
0.95190108e-02,
0.73810630e-02,
0.51771011e-02,
0.28821086e-02,
0.37416635e-03,
-0.26154167e-02,
-0.58998531e-02,
-0.68843709e-02,
0.35076261e-01,
0.39454699e-01,
0.44908728e-01,
0.50496329e-01,
0.55542119e-01,
0.59630550e-01,
0.62638551e-01,
0.64685628e-01,
0.65998107e-01,
0.66781543e-01,
0.67172192e-01,
0.67249492e-01,
0.67060962e-01,
0.66638172e-01,
0.66004358e-01,
0.65178059e-01,
0.64175054e-01,
0.63009582e-01,
0.61695036e-01,
0.60244419e-01,
0.58670532e-01,
0.56986067e-01,
0.55203587e-01,
0.53335473e-01,
0.51393870e-01,
0.49390342e-01,
0.47335327e-01,
0.45237295e-01,
0.43101642e-01,
0.40929142e-01,
0.38712744e-01,
0.36432110e-01,
0.34044892e-01,
0.31476185e-01,
0.28615938e-01,
0.25350343e-01,
0.21667363e-01,
0.17823832e-01,
0.14378536e-01,
0.11824818e-01,
0.10038571e-01,
0.84049767e-02,
0.65482627e-02,
0.45205113e-02,
0.24313403e-02,
0.26542676e-03,
-0.20983638e-02,
-0.49343021e-02,
-0.80712391e-02,
-0.89136148e-02,
0.32395583e-01,
0.36441319e-01,
0.41482292e-01,
0.46648715e-01,
0.51316999e-01,
0.55103421e-01,
0.57894230e-01,
0.59799597e-01,
0.61028276e-01,
0.61769772e-01,
0.62149592e-01,
0.62240742e-01,
0.62086754e-01,
0.61716419e-01,
0.61150856e-01,
0.60406826e-01,
0.59498589e-01,
0.58439020e-01,
0.57240289e-01,
0.55914193e-01,
0.54472402e-01,
0.52926507e-01,
0.51288098e-01,
0.49568728e-01,
0.47779780e-01,
0.45932278e-01,
0.44036478e-01,
0.42101391e-01,
0.40134147e-01,
0.38139164e-01,
0.36116980e-01,
0.34061395e-01,
0.31954251e-01,
0.29756844e-01,
0.27398031e-01,
0.24767753e-01,
0.21741455e-01,
0.18279733e-01,
0.14604551e-01,
0.11262298e-01,
0.87792939e-02,
0.70768204e-02,
0.55486909e-02,
0.38075072e-02,
0.19052560e-02,
-0.45109729e-04,
-0.20623163e-02,
-0.42746197e-02,
-0.69636726e-02,
-0.99668913e-02,
-0.10686405e-01,
0.29919144e-01,
0.33657782e-01,
0.38317338e-01,
0.43094639e-01,
0.47413833e-01,
0.50920542e-01,
0.53509615e-01,
0.55282630e-01,
0.56432150e-01,
0.57132918e-01,
0.57500545e-01,
0.57602141e-01,
0.57477590e-01,
0.57153169e-01,
0.56648072e-01,
0.55977501e-01,
0.55154376e-01,
0.54190353e-01,
0.53096451e-01,
0.51883381e-01,
0.50561778e-01,
0.49142279e-01,
0.47635533e-01,
0.46052203e-01,
0.44402875e-01,
0.42697888e-01,
0.40947042e-01,
0.39159160e-01,
0.37341885e-01,
0.35501439e-01,
0.33642113e-01,
0.31765264e-01,
0.29866640e-01,
0.27930658e-01,
0.25920473e-01,
0.23763975e-01,
0.21344284e-01,
0.18521296e-01,
0.15230944e-01,
0.11671958e-01,
0.83932094e-02,
0.59620556e-02,
0.43443809e-02,
0.29380166e-02,
0.13360849e-02,
-0.42020026e-03,
-0.22225457e-02,
-0.40951176e-02,
-0.61711343e-02,
-0.87360274e-02,
-0.11627702e-01,
-0.12245870e-01,
0.27632145e-01,
0.31087343e-01,
0.35394758e-01,
0.39812610e-01,
0.43809064e-01,
0.47056779e-01,
0.49458548e-01,
0.51108032e-01,
0.52182894e-01,
0.52844279e-01,
0.53198714e-01,
0.53307895e-01,
0.53208377e-01,
0.52924160e-01,
0.52472707e-01,
0.51867817e-01,
0.51121201e-01,
0.50243411e-01,
0.49244415e-01,
0.48133992e-01,
0.46921846e-01,
0.45617707e-01,
0.44231363e-01,
0.42772591e-01,
0.41251190e-01,
0.39676767e-01,
0.38058497e-01,
0.36404900e-01,
0.34723751e-01,
0.33022195e-01,
0.31306833e-01,
0.29583117e-01,
0.27853649e-01,
0.26114259e-01,
0.24346959e-01,
0.22509510e-01,
0.20522581e-01,
0.18262342e-01,
0.15582572e-01,
0.12412144e-01,
0.89437887e-02,
0.57348427e-02,
0.33833538e-02,
0.18811167e-02,
0.61761297e-03,
-0.83635934e-03,
-0.24498748e-02,
-0.41174246e-02,
-0.58653555e-02,
-0.78286622e-02,
-0.10294430e-01,
-0.13095230e-01,
-0.13630256e-01,
0.25520688e-01,
0.28714316e-01,
0.32696646e-01,
0.36782503e-01,
0.40480625e-01,
0.43488596e-01,
0.45716532e-01,
0.47250807e-01,
0.48255347e-01,
0.48878800e-01,
0.49219336e-01,
0.49333718e-01,
0.49255468e-01,
0.49006518e-01,
0.48602745e-01,
0.48056658e-01,
0.47378853e-01,
0.46578914e-01,
0.45665938e-01,
0.44648807e-01,
0.43536380e-01,
0.42337555e-01,
0.41061286e-01,
0.39716586e-01,
0.38312454e-01,
0.36857765e-01,
0.35361193e-01,
0.33831026e-01,
0.32275263e-01,
0.30701900e-01,
0.29118838e-01,
0.27533358e-01,
0.25950057e-01,
0.24367830e-01,
0.22775861e-01,
0.21148378e-01,
0.19438008e-01,
0.17566392e-01,
0.15416590e-01,
0.12849388e-01,
0.97933495e-02,
0.64339326e-02,
0.33275706e-02,
0.10872351e-02,
-0.28362754e-03,
-0.14064757e-02,
-0.27260752e-02,
-0.42158812e-02,
-0.57702521e-02,
-0.74158944e-02,
-0.92886547e-02,
-0.11677106e-01,
-0.14403825e-01,
-0.14870039e-01,
0.23571694e-01,
0.26523935e-01,
0.30206172e-01,
0.33985410e-01,
0.37407782e-01,
0.40193867e-01,
0.42260528e-01,
0.43687437e-01,
0.44625875e-01,
0.45212984e-01,
0.45539256e-01,
0.45656916e-01,
0.45596711e-01,
0.45378670e-01,
0.45017261e-01,
0.44523854e-01,
0.43908067e-01,
0.43178570e-01,
0.42343616e-01,
0.41411307e-01,
0.40389728e-01,
0.39287016e-01,
0.38111381e-01,
0.36871076e-01,
0.35574421e-01,
0.34229808e-01,
0.32845549e-01,
0.31430013e-01,
0.29991681e-01,
0.28539073e-01,
0.27080379e-01,
0.25622051e-01,
0.24167063e-01,
0.22712942e-01,
0.21250511e-01,
0.19763654e-01,
0.18227577e-01,
0.16602959e-01,
0.14823279e-01,
0.12781378e-01,
0.10339825e-01,
0.74180500e-02,
0.41875704e-02,
0.12003324e-02,
-0.92070986e-03,
-0.21659597e-02,
-0.31659470e-02,
-0.43731593e-02,
-0.57607451e-02,
-0.72222133e-02,
-0.87847579e-02,
-0.10585406e-01,
-0.12914552e-01,
-0.15580352e-01,
-0.15989216e-01,
0.21772955e-01,
0.24502428e-01,
0.27907638e-01,
0.31403694e-01,
0.34571216e-01,
0.37151974e-01,
0.39069071e-01,
0.40396009e-01,
0.41272413e-01,
0.41824844e-01,
0.42136710e-01,
0.42256072e-01,
0.42211138e-01,
0.42020235e-01,
0.41696560e-01,
0.41250426e-01,
0.40690560e-01,
0.40024836e-01,
0.39260726e-01,
0.38405582e-01,
0.37466776e-01,
0.36451757e-01,
0.35368055e-01,
0.34223344e-01,
0.33025496e-01,
0.31782612e-01,
0.30503126e-01,
0.29195679e-01,
0.27868953e-01,
0.26531136e-01,
0.25188750e-01,
0.23845278e-01,
0.22500057e-01,
0.21148371e-01,
0.19782964e-01,
0.18395819e-01,
0.16978383e-01,
0.15516935e-01,
0.13982293e-01,
0.12313342e-01,
0.10401287e-01,
0.81002032e-02,
0.53164582e-02,
0.22110264e-02,
-0.66281413e-03,
-0.26719193e-02,
-0.38054741e-02,
-0.47028475e-02,
-0.58184257e-02,
-0.71222009e-02,
-0.85073700e-02,
-0.10002246e-01,
-0.11745773e-01,
-0.14030821e-01,
-0.16646272e-01,
-0.17007222e-01,
0.20113155e-01,
0.22637051e-01,
0.25786523e-01,
0.29021049e-01,
0.31953044e-01,
0.34343820e-01,
0.36122233e-01,
0.37356097e-01,
0.38174324e-01,
0.38693711e-01,
0.38991190e-01,
0.39110996e-01,
0.39079025e-01,
0.38912032e-01,
0.38622018e-01,
0.38218360e-01,
0.37708975e-01,
0.37100986e-01,
0.36401182e-01,
0.35616249e-01,
0.34752883e-01,
0.33817910e-01,
0.32818370e-01,
0.31761579e-01,
0.30655265e-01,
0.29507659e-01,
0.28327364e-01,
0.27123058e-01,
0.25902657e-01,
0.24672238e-01,
0.23434937e-01,
0.22190256e-01,
0.20934626e-01,
0.19663457e-01,
0.18373784e-01,
0.17065847e-01,
0.15741942e-01,
0.14402619e-01,
0.13038933e-01,
0.11620902e-01,
0.10082259e-01,
0.83062556e-02,
0.61375611e-02,
0.34738728e-02,
0.47380762e-03,
-0.23008718e-02,
-0.42078327e-02,
-0.52424278e-02,
-0.60545313e-02,
-0.70956550e-02,
-0.83303098e-02,
-0.96523445e-02,
-0.11092181e-01,
-0.12791218e-01,
-0.15045415e-01,
-0.17619489e-01,
-0.17940814e-01,
0.18581720e-01,
0.20915883e-01,
0.23829265e-01,
0.26822245e-01,
0.29536562e-01,
0.31751547e-01,
0.33401374e-01,
0.34548633e-01,
0.35312355e-01,
0.35800364e-01,
0.36083620e-01,
0.36202855e-01,
0.36181841e-01,
0.36035892e-01,
0.35775941e-01,
0.35410490e-01,
0.34946699e-01,
0.34391019e-01,
0.33749603e-01,
0.33028524e-01,
0.32233991e-01,
0.31372394e-01,
0.30450473e-01,
0.29475490e-01,
0.28455326e-01,
0.27398327e-01,
0.26312927e-01,
0.25206672e-01,
0.24085123e-01,
0.22950860e-01,
0.21803081e-01,
0.20638589e-01,
0.19453924e-01,
0.18248010e-01,
0.17023819e-01,
0.15788162e-01,
0.14549590e-01,
0.13314429e-01,
0.12082065e-01,
0.10838719e-01,
0.95478874e-02,
0.81366943e-02,
0.64820210e-02,
0.44224774e-02,
0.18523625e-02,
-0.10648414e-02,
-0.37534775e-02,
-0.55650654e-02,
-0.65100682e-02,
-0.72507914e-02,
-0.82312562e-02,
-0.94086677e-02,
-0.10678349e-01,
-0.12073795e-01,
-0.13739666e-01,
-0.15975196e-01,
-0.18515551e-01,
-0.18804787e-01,
0.17168749e-01,
0.19327780e-01,
0.22023177e-01,
0.24793051e-01,
0.27306184e-01,
0.29358532e-01,
0.30889180e-01,
0.31955894e-01,
0.32668613e-01,
0.33126883e-01,
0.33396173e-01,
0.33513986e-01,
0.33502162e-01,
0.33374704e-01,
0.33141572e-01,
0.32810479e-01,
0.32387879e-01,
0.31879608e-01,
0.31291280e-01,
0.30628527e-01,
0.29897207e-01,
0.29103544e-01,
0.28254325e-01,
0.27356898e-01,
0.26419124e-01,
0.25448931e-01,
0.24453351e-01,
0.23437385e-01,
0.22403084e-01,
0.21349376e-01,
0.20273272e-01,
0.19171963e-01,
0.18045446e-01,
0.16897894e-01,
0.15737535e-01,
0.14574960e-01,
0.13420397e-01,
0.12280968e-01,
0.11158198e-01,
0.10045296e-01,
0.89227157e-02,
0.77492185e-02,
0.64479085e-02,
0.48916428e-02,
0.29151232e-02,
0.41272587e-03,
-0.24413855e-02,
-0.50533284e-02,
-0.67725605e-02,
-0.76342914e-02,
-0.83150435e-02,
-0.92462925e-02,
-0.10376302e-01,
-0.11603034e-01,
-0.12963645e-01,
-0.14606736e-01,
-0.16835131e-01,
-0.19348454e-01,
-0.19612487e-01,
0.15865134e-01,
0.17862506e-01,
0.20356622e-01,
0.22920400e-01,
0.25247563e-01,
0.27149413e-01,
0.28569562e-01,
0.29561354e-01,
0.30226331e-01,
0.30656436e-01,
0.30912070e-01,
0.31027781e-01,
0.31023609e-01,
0.30912362e-01,
0.30703111e-01,
0.30402854e-01,
0.30017478e-01,
0.29552329e-01,
0.29012615e-01,
0.28403712e-01,
0.27731372e-01,
0.27001891e-01,
0.26222091e-01,
0.25399221e-01,
0.24540421e-01,
0.23651907e-01,
0.22738006e-01,
0.21800319e-01,
0.20837659e-01,
0.19847298e-01,
0.18827075e-01,
0.17777784e-01,
0.16704248e-01,
0.15615269e-01,
0.14521973e-01,
0.13435473e-01,
0.12364727e-01,
0.11315233e-01,
0.10288181e-01,
0.92796814e-02,
0.82787825e-02,
0.72630350e-02,
0.61889994e-02,
0.49772034e-02,
0.34969368e-02,
0.15794969e-02,
-0.87806711e-03,
-0.36851568e-02,
-0.62257764e-02,
-0.78527220e-02,
-0.86356523e-02,
-0.92662042e-02,
-0.10158284e-01,
-0.11249557e-01,
-0.12441714e-01,
-0.13776368e-01,
-0.15406553e-01,
-0.17638773e-01,
-0.20130811e-01,
-0.20375945e-01,
0.14662431e-01,
0.16510550e-01,
0.18818781e-01,
0.21192145e-01,
0.23347380e-01,
0.25109937e-01,
0.26427617e-01,
0.27349673e-01,
0.27969934e-01,
0.28373329e-01,
0.28615609e-01,
0.28728599e-01,
0.28730700e-01,
0.28633634e-01,
0.28445678e-01,
0.28173234e-01,
0.27821736e-01,
0.27396221e-01,
0.26901733e-01,
0.26343603e-01,
0.25727598e-01,
0.25059966e-01,
0.24347208e-01,
0.23595579e-01,
0.22810331e-01,
0.21994932e-01,
0.21150416e-01,
0.20275565e-01,
0.19368108e-01,
0.18426621e-01,
0.17452605e-01,
0.16451407e-01,
0.15432071e-01,
0.14405738e-01,
0.13383573e-01,
0.12374919e-01,
0.11386278e-01,
0.10420920e-01,
0.94791828e-02,
0.85583618e-02,
0.76520443e-02,
0.67478423e-02,
0.58222217e-02,
0.48302943e-02,
0.36898023e-02,
0.22657616e-02,
0.38620809e-03,
-0.20464570e-02,
-0.48193890e-02,
-0.72907535e-02,
-0.88232690e-02,
-0.95311515e-02,
-0.10120570e-01,
-0.10982438e-01,
-0.12042844e-01,
-0.13208187e-01,
-0.14525170e-01,
-0.16151898e-01,
-0.18398402e-01,
-0.20873869e-01,
-0.21105917e-01,
0.13552637e-01,
0.15262923e-01,
0.17399436e-01,
0.19596824e-01,
0.21593064e-01,
0.23226669e-01,
0.24449309e-01,
0.25306473e-01,
0.25884863e-01,
0.26262935e-01,
0.26492154e-01,
0.26601870e-01,
0.26609030e-01,
0.26524415e-01,
0.26355645e-01,
0.26108669e-01,
0.25788641e-01,
0.25400463e-01,
0.24949163e-01,
0.24440058e-01,
0.23878768e-01,
0.23270955e-01,
0.22621866e-01,
0.21935685e-01,
0.21214921e-01,
0.20460036e-01,
0.19669790e-01,
0.18842377e-01,
0.17977156e-01,
0.17076379e-01,
0.16145866e-01,
0.15194753e-01,
0.14234038e-01,
0.13274660e-01,
0.12325864e-01,
0.11394359e-01,
0.10484057e-01,
0.95965564e-02,
0.87314118e-02,
0.78865895e-02,
0.70581776e-02,
0.62391688e-02,
0.54168063e-02,
0.45667454e-02,
0.36422682e-02,
0.25576062e-02,
0.11724485e-02,
-0.68855018e-03,
-0.31142109e-02,
-0.58629848e-02,
-0.82644373e-02,
-0.96990969e-02,
-0.10335494e-01,
-0.10892544e-01,
-0.11732457e-01,
-0.12769138e-01,
-0.13914877e-01,
-0.15221934e-01,
-0.16854212e-01,
-0.19125070e-01,
-0.21587910e-01,
-0.21812512e-01,
0.12528375e-01,
0.14111324e-01,
0.16089143e-01,
0.18123835e-01,
0.19972973e-01,
0.21487134e-01,
0.22621555e-01,
0.23418266e-01,
0.23957409e-01,
0.24311502e-01,
0.24528088e-01,
0.24634261e-01,
0.24645658e-01,
0.24572272e-01,
0.24421265e-01,
0.24198342e-01,
0.23908518e-01,
0.23556618e-01,
0.23147549e-01,
0.22686291e-01,
0.22177676e-01,
0.21625938e-01,
0.21034168e-01,
0.20403879e-01,
0.19734910e-01,
0.19025959e-01,
0.18275727e-01,
0.17484382e-01,
0.16654858e-01,
0.15793348e-01,
0.14909030e-01,
0.14012692e-01,
0.13114928e-01,
0.12224767e-01,
0.11348879e-01,
0.10491370e-01,
0.96542966e-02,
0.88380333e-02,
0.80419006e-02,
0.72643128e-02,
0.65027624e-02,
0.57532680e-02,
0.50087180e-02,
0.42559886e-02,
0.34697335e-02,
0.26006724e-02,
0.15582409e-02,
0.19586713e-03,
-0.16648462e-02,
-0.41000666e-02,
-0.68322704e-02,
-0.91604441e-02,
-0.10493010e-01,
-0.11061922e-01,
-0.11595163e-01,
-0.12420692e-01,
-0.13440105e-01,
-0.14572812e-01,
-0.15877264e-01,
-0.17524028e-01,
-0.19829245e-01,
-0.22282675e-01,
-0.22505168e-01,
0.11582712e-01,
0.13047962e-01,
0.14879054e-01,
0.16763248e-01,
0.18476224e-01,
0.19879704e-01,
0.20932244e-01,
0.21672688e-01,
0.22175148e-01,
0.22506684e-01,
0.22711273e-01,
0.22814000e-01,
0.22829419e-01,
0.22766909e-01,
0.22633271e-01,
0.22434004e-01,
0.22174012e-01,
0.21857906e-01,
0.21490037e-01,
0.21074377e-01,
0.20614149e-01,
0.20111440e-01,
0.19566908e-01,
0.18979838e-01,
0.18348834e-01,
0.17672971e-01,
0.16953107e-01,
0.16192837e-01,
0.15398719e-01,
0.14579884e-01,
0.13746735e-01,
0.12909485e-01,
0.12076905e-01,
0.11255554e-01,
0.10449625e-01,
0.96614184e-02,
0.88916803e-02,
0.81402799e-02,
0.74064764e-02,
0.66891308e-02,
0.59866421e-02,
0.52965824e-02,
0.46150312e-02,
0.39348379e-02,
0.32422331e-02,
0.25101355e-02,
0.16859862e-02,
0.67323016e-03,
-0.68223715e-03,
-0.25608686e-02,
-0.50209761e-02,
-0.77415816e-02,
-0.99905552e-02,
-0.11216282e-01,
-0.11722405e-01,
-0.12240239e-01,
-0.13058126e-01,
-0.14066087e-01,
-0.15192053e-01,
-0.16501144e-01,
-0.18171404e-01,
-0.20520791e-01,
-0.22966992e-01,
-0.23192288e-01,
0.10709423e-01,
0.12065823e-01,
0.13761187e-01,
0.15506121e-01,
0.17093049e-01,
0.18394005e-01,
0.19370625e-01,
0.20058842e-01,
0.20527244e-01,
0.20837912e-01,
0.21031609e-01,
0.21131653e-01,
0.21151649e-01,
0.21100447e-01,
0.20984545e-01,
0.20809161e-01,
0.20578744e-01,
0.20297162e-01,
0.19967660e-01,
0.19592566e-01,
0.19173032e-01,
0.18708948e-01,
0.18199200e-01,
0.17642427e-01,
0.17038029e-01,
0.16387362e-01,
0.15694451e-01,
0.14966104e-01,
0.14211375e-01,
0.13440318e-01,
0.12662663e-01,
0.11886760e-01,
0.11119010e-01,
0.10363686e-01,
0.96232807e-02,
0.88988189e-02,
0.81905108e-02,
0.74980101e-02,
0.68207132e-02,
0.61578541e-02,
0.55083996e-02,
0.48709009e-02,
0.42431629e-02,
0.36212225e-02,
0.29974312e-02,
0.23571376e-02,
0.16709621e-02,
0.88186091e-03,
-0.11385441e-03,
-0.14786492e-02,
-0.33936626e-02,
-0.58928211e-02,
-0.86036716e-02,
-0.10764806e-01,
-0.11878857e-01,
-0.12327690e-01,
-0.12838322e-01,
-0.13654754e-01,
-0.14656777e-01,
-0.15782116e-01,
-0.17102929e-01,
-0.18805560e-01,
-0.21208614e-01,
-0.23648826e-01,
-0.23881109e-01,
0.99031366e-02,
0.11158862e-01,
0.12728675e-01,
0.14344782e-01,
0.15815102e-01,
0.17021233e-01,
0.17927697e-01,
0.18567791e-01,
0.19005040e-01,
0.19296985e-01,
0.19481450e-01,
0.19580156e-01,
0.19605840e-01,
0.19566806e-01,
0.19469030e-01,
0.19317091e-01,
0.19114554e-01,
0.18864036e-01,
0.18567108e-01,
0.18224217e-01,
0.17834747e-01,
0.17397350e-01,
0.16910696e-01,
0.16374495e-01,
0.15790507e-01,
0.15163027e-01,
0.14498919e-01,
0.13806998e-01,
0.13096904e-01,
0.12377877e-01,
0.11657882e-01,
0.10943062e-01,
0.10237600e-01,
0.95441425e-02,
0.88640014e-02,
0.81976959e-02,
0.75451867e-02,
0.69061988e-02,
0.62803319e-02,
0.56670737e-02,
0.50658169e-02,
0.44758078e-02,
0.38957228e-02,
0.33232321e-02,
0.27542519e-02,
0.21804536e-02,
0.15856215e-02,
0.93752530e-03,
0.17370789e-03,
-0.81816822e-03,
-0.22095586e-02,
-0.41797617e-02,
-0.67304755e-02,
-0.94296988e-02,
-0.11491459e-01,
-0.12489353e-01,
-0.12887697e-01,
-0.13399359e-01,
-0.14220017e-01,
-0.15221213e-01,
-0.16351623e-01,
-0.17690992e-01,
-0.19434931e-01,
-0.21900531e-01,
-0.24334403e-01,
-0.24576908e-01,
0.91596041e-02,
0.10322293e-01,
0.11776099e-01,
0.13273214e-01,
0.14635916e-01,
0.15754675e-01,
0.16596718e-01,
0.17192930e-01,
0.17602194e-01,
0.17877869e-01,
0.18055072e-01,
0.18153975e-01,
0.18186348e-01,
0.18159697e-01,
0.18079169e-01,
0.17948311e-01,
0.17769367e-01,
0.17543392e-01,
0.17270328e-01,
0.16949220e-01,
0.16578663e-01,
0.16157566e-01,
0.15685990e-01,
0.15165905e-01,
0.14601668e-01,
0.14000042e-01,
0.13369541e-01,
0.12719298e-01,
0.12057992e-01,
0.11393145e-01,
0.10730589e-01,
0.10074451e-01,
0.94274255e-02,
0.87909503e-02,
0.81658233e-02,
0.75523388e-02,
0.69505223e-02,
0.63602584e-02,
0.57813367e-02,
0.52134916e-02,
0.46564899e-02,
0.41098930e-02,
0.35728957e-02,
0.30442516e-02,
0.25214758e-02,
0.19998695e-02,
0.14702382e-02,
0.91451913e-03,
0.29673893e-03,
-0.45211281e-03,
-0.14543475e-02,
-0.28908835e-02,
-0.49353936e-02,
-0.75476863e-02,
-0.10228912e-01,
-0.12177330e-01,
-0.13055839e-01,
-0.13411977e-01,
-0.13932705e-01,
-0.14762470e-01,
-0.15767431e-01,
-0.16908331e-01,
-0.18272618e-01,
-0.20066334e-01,
-0.22602497e-01,
-0.25027955e-01,
-0.25282444e-01,
0.84757134e-02,
0.95526502e-02,
0.10899562e-01,
0.12287115e-01,
0.13550865e-01,
0.14589507e-01,
0.15372781e-01,
0.15929360e-01,
0.16313823e-01,
0.16575595e-01,
0.16747160e-01,
0.16847055e-01,
0.16885890e-01,
0.16870106e-01,
0.16803687e-01,
0.16688865e-01,
0.16526472e-01,
0.16316228e-01,
0.16057082e-01,
0.15747746e-01,
0.15387391e-01,
0.14976392e-01,
0.14516956e-01,
0.14013476e-01,
0.13472388e-01,
0.12901680e-01,
0.12309901e-01,
0.11705250e-01,
0.11094769e-01,
0.10483908e-01,
0.98766321e-02,
0.92755975e-02,
0.86824028e-02,
0.80980305e-02,
0.75229523e-02,
0.69574793e-02,
0.64018168e-02,
0.58560292e-02,
0.53201127e-02,
0.47941157e-02,
0.42779413e-02,
0.37713288e-02,
0.32739611e-02,
0.27850254e-02,
0.23029358e-02,
0.18249402e-02,
0.13457378e-02,
0.85484359e-03,
0.33187922e-03,
-0.26360160e-03,
-0.10086164e-02,
-0.20368681e-02,
-0.35384803e-02,
-0.56763412e-02,
-0.83571654e-02,
-0.11009140e-01,
-0.12828228e-01,
-0.13585953e-01,
-0.13909469e-01,
-0.14446771e-01,
-0.15289789e-01,
-0.16302353e-01,
-0.17458308e-01,
-0.18853234e-01,
-0.20704713e-01,
-0.23318050e-01,
-0.25730822e-01,
-0.25997423e-01,
0.78488868e-02,
0.88471249e-02,
0.10095963e-01,
0.11383073e-01,
0.12556248e-01,
0.13521745e-01,
0.14251599e-01,
0.14772387e-01,
0.15134628e-01,
0.15383984e-01,
0.15550311e-01,
0.15650392e-01,
0.15693463e-01,
0.15684696e-01,
0.15626790e-01,
0.15520756e-01,
0.15366406e-01,
0.15162837e-01,
0.14909022e-01,
0.14604477e-01,
0.14249890e-01,
0.13847613e-01,
0.13401926e-01,
0.12918934e-01,
0.12406076e-01,
0.11871213e-01,
0.11321891e-01,
0.10764673e-01,
0.10204747e-01,
0.96458644e-02,
0.90905391e-02,
0.85403854e-02,
0.79964781e-02,
0.74595166e-02,
0.69300104e-02,
0.64083328e-02,
0.58948221e-02,
0.53897803e-02,
0.48935004e-02,
0.44061504e-02,
0.39277528e-02,
0.34583132e-02,
0.29976317e-02,
0.25451381e-02,
0.20998786e-02,
0.16600611e-02,
0.12224072e-02,
0.78081363e-03,
0.32335857e-03,
-0.17330737e-03,
-0.75507897e-03,
-0.15084407e-02,
-0.25801368e-02,
-0.41686138e-02,
-0.64185774e-02,
-0.91707250e-02,
-0.11776590e-01,
-0.13448806e-01,
-0.14086716e-01,
-0.14388485e-01,
-0.14948927e-01,
-0.15807996e-01,
-0.16831016e-01,
-0.18005654e-01,
-0.19435892e-01,
-0.21352261e-01,
-0.24047775e-01,
-0.26441328e-01,
-0.26718097e-01,
0.72761136e-02,
0.82024941e-02,
0.93617616e-02,
0.10557142e-01,
0.11647610e-01,
0.12546310e-01,
0.13227314e-01,
0.13715181e-01,
0.14056569e-01,
0.14293512e-01,
0.14453239e-01,
0.14550695e-01,
0.14593684e-01,
0.14586142e-01,
0.14529741e-01,
0.14424738e-01,
0.14270628e-01,
0.14066807e-01,
0.13813181e-01,
0.13510717e-01,
0.13161879e-01,
0.12770806e-01,
0.12343196e-01,
0.11885820e-01,
0.11405828e-01,
0.10910179e-01,
0.10404940e-01,
0.98949000e-02,
0.93836067e-02,
0.88735307e-02,
0.83662188e-02,
0.78627151e-02,
0.73637851e-02,
0.68700551e-02,
0.63821264e-02,
0.59005306e-02,
0.54257177e-02,
0.49582440e-02,
0.44985944e-02,
0.40470236e-02,
0.36037776e-02,
0.31689804e-02,
0.27423929e-02,
0.23237155e-02,
0.19122795e-02,
0.15068311e-02,
0.11052121e-02,
0.70368673e-03,
0.29509133e-03,
-0.13425731e-03,
-0.61128259e-03,
-0.11886779e-02,
-0.19643204e-02,
-0.30994283e-02,
-0.47985963e-02,
-0.71782242e-02,
-0.99987285e-02,
-0.12535306e-01,
-0.14042607e-01,
-0.14564627e-01,
-0.14856030e-01,
-0.15444703e-01,
-0.16321121e-01,
-0.17356047e-01,
-0.18551625e-01,
-0.20020738e-01,
-0.22008330e-01,
-0.24788912e-01,
-0.27153788e-01,
-0.27435988e-01,
0.67528659e-02,
0.76138428e-02,
0.86914934e-02,
0.98031582e-02,
0.10817948e-01,
0.11655207e-01,
0.12290766e-01,
0.12747228e-01,
0.13067597e-01,
0.13290452e-01,
0.13440499e-01,
0.13530949e-01,
0.13568352e-01,
0.13555742e-01,
0.13494246e-01,
0.13384038e-01,
0.13225075e-01,
0.13017694e-01,
0.12763139e-01,
0.12463950e-01,
0.12124085e-01,
0.11748808e-01,
0.11344243e-01,
0.10916842e-01,
0.10472860e-01,
0.10017826e-01,
0.95562525e-02,
0.90915291e-02,
0.86259460e-02,
0.81609664e-02,
0.76976661e-02,
0.72367843e-02,
0.67789229e-02,
0.63247578e-02,
0.58749435e-02,
0.54301512e-02,
0.49911011e-02,
0.45584417e-02,
0.41327197e-02,
0.37144551e-02,
0.33040245e-02,
0.29014633e-02,
0.25067537e-02,
0.21196145e-02,
0.17395143e-02,
0.13655368e-02,
0.99622435e-03,
0.62900473e-03,
0.25951708e-03,
-0.12054403e-03,
-0.52714220e-03,
-0.99177333e-03,
-0.15755766e-02,
-0.23898906e-02,
-0.36114564e-02,
-0.54468848e-02,
-0.79709487e-02,
-0.10849346e-01,
-0.13287026e-01,
-0.14611776e-01,
-0.15024997e-01,
-0.15317407e-01,
-0.15937271e-01,
-0.16830366e-01,
-0.17877102e-01,
-0.19094490e-01,
-0.20604689e-01,
-0.22668475e-01,
-0.25534086e-01,
-0.27857255e-01,
-0.28136132e-01,
0.62725060e-02,
0.70738452e-02,
0.80769034e-02,
0.91117928e-02,
0.10056756e-01,
0.10836687e-01,
0.11428916e-01,
0.11854174e-01,
0.12152059e-01,
0.12357980e-01,
0.12494393e-01,
0.12573079e-01,
0.12599719e-01,
0.12576928e-01,
0.12505878e-01,
0.12387235e-01,
0.12221836e-01,
0.12011216e-01,
0.11757963e-01,
0.11465836e-01,
0.11139663e-01,
0.10784952e-01,
0.10407481e-01,
0.10012860e-01,
0.96060764e-02,
0.91912504e-02,
0.87715685e-02,
0.83493032e-02,
0.79259127e-02,
0.75023416e-02,
0.70792502e-02,
0.66572730e-02,
0.62370505e-02,
0.58192150e-02,
0.54045119e-02,
0.49937805e-02,
0.45878929e-02,
0.41875835e-02,
0.37935111e-02,
0.34063701e-02,
0.30265125e-02,
0.26541308e-02,
0.22892184e-02,
0.19315761e-02,
0.15806772e-02,
0.12358783e-02,
0.89602108e-03,
0.55933301e-03,
0.22290465e-03,
-0.11837736e-03,
-0.47439354e-03,
-0.86409569e-03,
-0.13247277e-02,
-0.19277434e-02,
-0.28001412e-02,
-0.41344408e-02,
-0.61328700e-02,
-0.88115418e-02,
-0.11727598e-01,
-0.14030309e-01,
-0.15156906e-01,
-0.15471662e-01,
-0.15775396e-01,
-0.16426805e-01,
-0.17333917e-01,
-0.18390546e-01,
-0.19628605e-01,
-0.21180209e-01,
-0.23323257e-01,
-0.26269557e-01,
-0.28532716e-01,
-0.28793098e-01,
0.58267605e-02,
0.65732352e-02,
0.75074541e-02,
0.84711155e-02,
0.93507199e-02,
0.10076155e-01,
0.10626074e-01,
0.11019482e-01,
0.11292907e-01,
0.11478950e-01,
0.11598186e-01,
0.11661368e-01,
0.11673780e-01,
0.11638117e-01,
0.11556003e-01,
0.11428879e-01,
0.11258594e-01,
0.11047787e-01,
0.10800015e-01,
0.10519676e-01,
0.10211711e-01,
0.98812887e-02,
0.95334202e-02,
0.91725811e-02,
0.88025127e-02,
0.84261857e-02,
0.80457665e-02,
0.76626753e-02,
0.72779190e-02,
0.68921465e-02,
0.65058237e-02,
0.61194906e-02,
0.57338160e-02,
0.53495029e-02,
0.49673491e-02,
0.45883162e-02,
0.42133094e-02,
0.38431743e-02,
0.34787988e-02,
0.31207474e-02,
0.27695056e-02,
0.24253791e-02,
0.20884008e-02,
0.17583206e-02,
0.14347950e-02,
0.11171702e-02,
0.80453861e-03,
0.49563375e-03,
0.18838409e-03,
-0.12067244e-03,
-0.43773217e-03,
-0.77462569e-03,
-0.11541138e-02,
-0.16205981e-02,
-0.22580458e-02,
-0.32116263e-02,
-0.46886820e-02,
-0.68765748e-02,
-0.97123422e-02,
-0.12634131e-01,
-0.14760367e-01,
-0.15676778e-01,
-0.15906356e-01,
-0.16229935e-01,
-0.16910229e-01,
-0.17826060e-01,
-0.18888203e-01,
-0.20143216e-01,
-0.21733690e-01,
-0.23955522e-01,
-0.26971251e-01,
-0.29147699e-01,
-0.29363733e-01,
0.54070717e-02,
0.61022816e-02,
0.69719772e-02,
0.78685069e-02,
0.86859381e-02,
0.93587395e-02,
0.98667927e-02,
0.10227575e-01,
0.10474904e-01,
0.10638993e-01,
0.10738960e-01,
0.10784928e-01,
0.10782113e-01,
0.10733562e-01,
0.10641565e-01,
0.10508422e-01,
0.10336863e-01,
0.10130250e-01,
0.98925652e-02,
0.96282410e-02,
0.93418919e-02,
0.90379883e-02,
0.87205544e-02,
0.83929868e-02,
0.80580246e-02,
0.77177160e-02,
0.73734652e-02,
0.70262798e-02,
0.66767824e-02,
0.63254600e-02,
0.59727905e-02,
0.56192488e-02,
0.52654315e-02,
0.49121627e-02,
0.45603677e-02,
0.42109452e-02,
0.38648082e-02,
0.35230482e-02,
0.31864857e-02,
0.28557458e-02,
0.25313764e-02,
0.22137545e-02,
0.19029069e-02,
0.15987142e-02,
0.13008172e-02,
0.10086106e-02,
0.72139426e-03,
0.43811998e-03,
0.15723592e-03,
-0.12376621e-03,
-0.40906059e-03,
-0.70619176e-03,
-0.10294477e-02,
-0.14063339e-02,
-0.18904223e-02,
-0.25809507e-02,
-0.36431164e-02,
-0.52961027e-02,
-0.76976335e-02,
-0.10682030e-01,
-0.13564016e-01,
-0.15468677e-01,
-0.16168477e-01,
-0.16328586e-01,
-0.16677296e-01,
-0.17379910e-01,
-0.18295975e-01,
-0.19355783e-01,
-0.20619802e-01,
-0.22241637e-01,
-0.24535159e-01,
-0.27597541e-01,
-0.29646901e-01,
-0.29775085e-01,
0.50061666e-02,
0.56525916e-02,
0.64607500e-02,
0.72929878e-02,
0.80504669e-02,
0.86719748e-02,
0.91386139e-02,
0.94665419e-02,
0.96872011e-02,
0.98287789e-02,
0.99092564e-02,
0.99384002e-02,
0.99215917e-02,
0.98623503e-02,
0.97636050e-02,
0.96283276e-02,
0.94598122e-02,
0.92617264e-02,
0.90380237e-02,
0.87927980e-02,
0.85300365e-02,
0.82533574e-02,
0.79658525e-02,
0.76700603e-02,
0.73679010e-02,
0.70607387e-02,
0.67495992e-02,
0.64351326e-02,
0.61177979e-02,
0.57979897e-02,
0.54761712e-02,
0.51528304e-02,
0.48285746e-02,
0.45042443e-02,
0.41807955e-02,
0.38590934e-02,
0.35402139e-02,
0.32251289e-02,
0.29146906e-02,
0.26096692e-02,
0.23106080e-02,
0.20178384e-02,
0.17315357e-02,
0.14516271e-02,
0.11777402e-02,
0.90943626e-03,
0.64600032e-03,
0.38659063e-03,
0.12990853e-03,
-0.12597593e-03,
-0.38408191e-03,
-0.64947037e-03,
-0.93128323e-03,
-0.12469848e-02,
-0.16304143e-02,
-0.21464191e-02,
-0.29125693e-02,
-0.41156993e-02,
-0.59804209e-02,
-0.86140092e-02,
-0.11723425e-01,
-0.14505944e-01,
-0.16143575e-01,
-0.16627248e-01,
-0.16734317e-01,
-0.17108738e-01,
-0.17822174e-01,
-0.18725207e-01,
-0.19769130e-01,
-0.21026963e-01,
-0.22663718e-01,
-0.25009871e-01,
-0.28077208e-01,
-0.29936161e-01,
-0.29903086e-01,
0.46191481e-02,
0.52183974e-02,
0.59670168e-02,
0.67369491e-02,
0.74361749e-02,
0.80076661e-02,
0.84337806e-02,
0.87295286e-02,
0.89242216e-02,
0.90442616e-02,
0.91067078e-02,
0.91211265e-02,
0.90931328e-02,
0.90266792e-02,
0.89251632e-02,
0.87919217e-02,
0.86304275e-02,
0.84443204e-02,
0.82372911e-02,
0.80128787e-02,
0.77742967e-02,
0.75243628e-02,
0.72654327e-02,
0.69993068e-02,
0.67273136e-02,
0.64504785e-02,
0.61694812e-02,
0.58848094e-02,
0.55968449e-02,
0.53059584e-02,
0.50125136e-02,
0.47170832e-02,
0.44203578e-02,
0.41231164e-02,
0.38261772e-02,
0.35306206e-02,
0.32374398e-02,
0.29475170e-02,
0.26617502e-02,
0.23809616e-02,
0.21057320e-02,
0.18363672e-02,
0.15731406e-02,
0.13160043e-02,
0.10647054e-02,
0.81881043e-03,
0.57776505e-03,
0.34065990e-03,
0.10639309e-03,
-0.12660332e-03,
-0.36068761e-03,
-0.59937662e-03,
-0.84880623e-03,
-0.11204274e-02,
-0.14357190e-02,
-0.18365043e-02,
-0.24020236e-02,
-0.32714859e-02,
-0.46532732e-02,
-0.67663151e-02,
-0.96402615e-02,
-0.12831893e-01,
-0.15441692e-01,
-0.16770078e-01,
-0.17045652e-01,
-0.17114457e-01,
-0.17508077e-01,
-0.18213695e-01,
-0.19082699e-01,
-0.20087397e-01,
-0.21311399e-01,
-0.22931056e-01,
-0.25289452e-01,
-0.28288210e-01,
-0.29855460e-01,
-0.29540431e-01,
0.42438372e-02,
0.47970237e-02,
0.54875640e-02,
0.61967750e-02,
0.68393080e-02,
0.73622656e-02,
0.77493028e-02,
0.80144051e-02,
0.81849089e-02,
0.82855793e-02,
0.83326427e-02,
0.83354777e-02,
0.82998471e-02,
0.82299737e-02,
0.81294924e-02,
0.80018509e-02,
0.78504579e-02,
0.76786745e-02,
0.74897227e-02,
0.72865360e-02,
0.70716739e-02,
0.68472759e-02,
0.66150147e-02,
0.63761803e-02,
0.61317808e-02,
0.58825328e-02,
0.56289569e-02,
0.53714588e-02,
0.51103933e-02,
0.48460537e-02,
0.45788516e-02,
0.43093446e-02,
0.40383078e-02,
0.37663926e-02,
0.34944667e-02,
0.32235801e-02,
0.29546390e-02,
0.26885115e-02,
0.24261076e-02,
0.21682207e-02,
0.19154118e-02,
0.16681626e-02,
0.14266440e-02,
0.11909056e-02,
0.96084451e-03,
0.73608221e-03,
0.51601185e-03,
0.29983022e-03,
0.86455977e-04,
-0.12548176e-03,
-0.33780126e-03,
-0.55322744e-03,
-0.77611348e-03,
-0.10140111e-02,
-0.12810014e-02,
-0.16041107e-02,
-0.20356334e-02,
-0.26726879e-02,
-0.36793316e-02,
-0.52829208e-02,
-0.76788506e-02,
-0.10785309e-01,
-0.13992673e-01,
-0.16345447e-01,
-0.17329855e-01,
-0.17411666e-01,
-0.17451219e-01,
-0.17846853e-01,
-0.18514944e-01,
-0.19316018e-01,
-0.20241588e-01,
-0.21382602e-01,
-0.22925586e-01,
-0.25219092e-01,
-0.28025590e-01,
-0.29143283e-01,
-0.28364539e-01,
0.38802244e-02,
0.43883347e-02,
0.50221425e-02,
0.56721950e-02,
0.62597329e-02,
0.67359642e-02,
0.70858784e-02,
0.73225168e-02,
0.74713244e-02,
0.75554666e-02,
0.75903148e-02,
0.75849718e-02,
0.75452076e-02,
0.74753161e-02,
0.73789642e-02,
0.72595305e-02,
0.71201995e-02,
0.69639226e-02,
0.67933705e-02,
0.66109342e-02,
0.64186375e-02,
0.62180408e-02,
0.60103335e-02,
0.57964721e-02,
0.55771875e-02,
0.53530186e-02,
0.51244237e-02,
0.48917811e-02,
0.46553644e-02,
0.44155009e-02,
0.41726306e-02,
0.39273482e-02,
0.36802413e-02,
0.34321158e-02,
0.31838417e-02,
0.29362249e-02,
0.26901832e-02,
0.24466240e-02,
0.22063667e-02,
0.19700988e-02,
0.17385191e-02,
0.15121014e-02,
0.12910479e-02,
0.10755060e-02,
0.86542196e-03,
0.66050846e-03,
0.46016547e-03,
0.26359106e-03,
0.69742789e-04,
-0.12264148e-03,
-0.31505234e-03,
-0.50968054e-03,
-0.70983008e-03,
-0.92059938e-03,
-0.11514680e-02,
-0.14202188e-02,
-0.17611791e-02,
-0.22402222e-02,
-0.29767312e-02,
-0.41622054e-02,
-0.60354555e-02,
-0.87412195e-02,
-0.12048509e-01,
-0.15178418e-01,
-0.17183300e-01,
-0.17799482e-01,
-0.17704254e-01,
-0.17711623e-01,
-0.18075820e-01,
-0.18659104e-01,
-0.19336520e-01,
-0.20114373e-01,
-0.21086661e-01,
-0.22448380e-01,
-0.24543686e-01,
-0.26968870e-01,
-0.27425798e-01,
-0.25982859e-01,
0.35297391e-02,
0.39938814e-02,
0.45724809e-02,
0.51651788e-02,
0.56997212e-02,
0.61313836e-02,
0.64464915e-02,
0.66571729e-02,
0.67870081e-02,
0.68575540e-02,
0.68832617e-02,
0.68728351e-02,
0.68318904e-02,
0.67646406e-02,
0.66746362e-02,
0.65650316e-02,
0.64386618e-02,
0.62980657e-02,
0.61454801e-02,
0.59827766e-02,
0.58114552e-02,
0.56327097e-02,
0.54474538e-02,
0.52563460e-02,
0.50598914e-02,
0.48585772e-02,
0.46528103e-02,
0.44429065e-02,
0.42291693e-02,
0.40119542e-02,
0.37917392e-02,
0.35690265e-02,
0.33443815e-02,
0.31186463e-02,
0.28926407e-02,
0.26670455e-02,
0.24426919e-02,
0.22205103e-02,
0.20012076e-02,
0.17854705e-02,
0.15739684e-02,
0.13671899e-02,
0.11654871e-02,
0.96899009e-03,
0.77774836e-03,
0.59148681e-03,
0.40965760e-03,
0.23145005e-03,
0.55848002e-04,
-0.11826525e-03,
-0.29229349e-03,
-0.46810537e-03,
-0.64812414e-03,
-0.83613564e-03,
-0.10385482e-02,
-0.12673559e-02,
-0.15453240e-02,
-0.19167364e-02,
-0.24653263e-02,
-0.33373679e-02,
-0.47518569e-02,
-0.69443211e-02,
-0.99710366e-02,
-0.13414696e-01,
-0.16345950e-01,
-0.17911386e-01,
-0.18145934e-01,
-0.17885052e-01,
-0.17836532e-01,
-0.18110715e-01,
-0.18532671e-01,
-0.18993681e-01,
-0.19508589e-01,
-0.20170461e-01,
-0.21185948e-01,
-0.22894341e-01,
-0.24724348e-01,
-0.24370475e-01,
-0.22268195e-01,
0.31942856e-02,
0.36158317e-02,
0.41410751e-02,
0.46785669e-02,
0.51624421e-02,
0.55519687e-02,
0.58347746e-02,
0.60220673e-02,
0.61355513e-02,
0.61951405e-02,
0.62143006e-02,
0.62012500e-02,
0.61613480e-02,
0.60985861e-02,
0.60162526e-02,
0.59171882e-02,
0.58038891e-02,
0.56785326e-02,
0.55429381e-02,
0.53985524e-02,
0.52465028e-02,
0.50876997e-02,
0.49228426e-02,
0.47523943e-02,
0.45767589e-02,
0.43963231e-02,
0.42114421e-02,
0.40224311e-02,
0.38296313e-02,
0.36334537e-02,
0.34343314e-02,
0.32326737e-02,
0.30291770e-02,
0.28245456e-02,
0.26194807e-02,
0.24146992e-02,
0.22109461e-02,
0.20089839e-02,
0.18095284e-02,
0.16132607e-02,
0.14207716e-02,
0.12325735e-02,
0.10491284e-02,
0.87066717e-03,
0.69715641e-03,
0.52842626e-03,
0.36394782e-03,
0.20294097e-03,
0.44450713e-04,
-0.11262445e-03,
-0.26963896e-03,
-0.42814302e-03,
-0.59004046e-03,
-0.75823971e-03,
-0.93725865e-03,
-0.11351963e-02,
-0.13674999e-02,
-0.16641312e-02,
-0.20829688e-02,
-0.27305069e-02,
-0.37841904e-02,
-0.54863039e-02,
-0.80432529e-02,
-0.11373957e-01,
-0.14846842e-01,
-0.17431933e-01,
-0.18471425e-01,
-0.18317387e-01,
-0.17884307e-01,
-0.17721877e-01,
-0.17807389e-01,
-0.17944368e-01,
-0.18038927e-01,
-0.18111341e-01,
-0.18263325e-01,
-0.18744303e-01,
-0.19931488e-01,
-0.21152221e-01,
-0.20258194e-01,
-0.18154927e-01,
0.28755944e-02,
0.32562227e-02,
0.37303183e-02,
0.42150859e-02,
0.46508745e-02,
0.50008302e-02,
0.52538323e-02,
0.54201568e-02,
0.55196281e-02,
0.55704820e-02,
0.55851364e-02,
0.55712480e-02,
0.55338335e-02,
0.54765893e-02,
0.54024858e-02,
0.53140409e-02,
0.52134213e-02,
0.51024402e-02,
0.49825548e-02,
0.48549226e-02,
0.47204504e-02,
0.45797881e-02,
0.44334051e-02,
0.42817341e-02,
0.41251029e-02,
0.39637811e-02,
0.37980722e-02,
0.36283240e-02,
0.34549329e-02,
0.32783179e-02,
0.30987991e-02,
0.29169142e-02,
0.27332988e-02,
0.25485274e-02,
0.23631887e-02,
0.21780862e-02,
0.19938366e-02,
0.18110086e-02,
0.16303621e-02,
0.14525431e-02,
0.12780537e-02,
0.11074701e-02,
0.94128802e-03,
0.77982916e-03,
0.62306761e-03,
0.47078525e-03,
0.32257999e-03,
0.17765176e-03,
0.35159428e-04,
-0.10600228e-03,
-0.24719909e-03,
-0.38967439e-03,
-0.53508411e-03,
-0.68561384e-03,
-0.84459153e-03,
-0.10175847e-02,
-0.12152690e-02,
-0.14582346e-02,
-0.17864328e-02,
-0.22758693e-02,
-0.30617819e-02,
-0.43552169e-02,
-0.64090681e-02,
-0.93599092e-02,
-0.12933343e-01,
-0.16276378e-01,
-0.18346461e-01,
-0.18781584e-01,
-0.18226096e-01,
-0.17576260e-01,
-0.17187940e-01,
-0.16925601e-01,
-0.16586989e-01,
-0.16100395e-01,
-0.15514784e-01,
-0.15000328e-01,
-0.14952605e-01,
-0.15897794e-01,
-0.17156484e-01,
-0.16811144e-01,
-0.16059641e-01,
0.25749404e-02,
0.29165992e-02,
0.33420587e-02,
0.37768418e-02,
0.41672788e-02,
0.44802395e-02,
0.47057904e-02,
0.48532858e-02,
0.49406979e-02,
0.49845842e-02,
0.49962578e-02,
0.49827271e-02,
0.49485951e-02,
0.48972480e-02,
0.48313839e-02,
0.47532097e-02,
0.46645310e-02,
0.45668236e-02,
0.44612922e-02,
0.43488974e-02,
0.42303428e-02,
0.41060960e-02,
0.39765178e-02,
0.38419361e-02,
0.37026398e-02,
0.35588457e-02,
0.34108032e-02,
0.32588942e-02,
0.31035459e-02,
0.29450748e-02,
0.27838917e-02,
0.26205610e-02,
0.24555970e-02,
0.22894754e-02,
0.21228124e-02,
0.19562438e-02,
0.17903824e-02,
0.16257121e-02,
0.14628675e-02,
0.13024764e-02,
0.11450466e-02,
0.99116936e-03,
0.84134901e-03,
0.69589523e-03,
0.55489980e-03,
0.41809730e-03,
0.28507152e-03,
0.15523843e-03,
0.27696389e-04,
-0.98706871e-04,
-0.22511023e-03,
-0.35272306e-03,
-0.48294108e-03,
-0.61748014e-03,
-0.75878931e-03,
-0.91083115e-03,
-0.10810896e-02,
-0.12839842e-02,
-0.15473457e-02,
-0.19251802e-02,
-0.25176301e-02,
-0.34943304e-02,
-0.50974879e-02,
-0.75646825e-02,
-0.10904021e-01,
-0.14594574e-01,
-0.17590806e-01,
-0.18962247e-01,
-0.18717308e-01,
-0.17718159e-01,
-0.16742069e-01,
-0.15938997e-01,
-0.15096456e-01,
-0.14041902e-01,
-0.12778138e-01,
-0.11481387e-01,
-0.10537255e-01,
-0.10632942e-01,
-0.12457354e-01,
-0.15134930e-01,
-0.16634356e-01,
-0.17927606e-01,
0.22929932e-02,
0.25978240e-02,
0.29773721e-02,
0.33650806e-02,
0.37129789e-02,
0.39914814e-02,
0.41917670e-02,
0.43222816e-02,
0.43991888e-02,
0.44373926e-02,
0.44470965e-02,
0.44346289e-02,
0.44041476e-02,
0.43587163e-02,
0.43007480e-02,
0.42321626e-02,
0.41544731e-02,
0.40688911e-02,
0.39764121e-02,
0.38778088e-02,
0.37736101e-02,
0.36642191e-02,
0.35499295e-02,
0.34309430e-02,
0.33074785e-02,
0.31797730e-02,
0.30480758e-02,
0.29127430e-02,
0.27741243e-02,
0.26325521e-02,
0.24885421e-02,
0.23425936e-02,
0.21950973e-02,
0.20465283e-02,
0.18974531e-02,
0.17483715e-02,
0.15998175e-02,
0.14523078e-02,
0.13063080e-02,
0.11623653e-02,
0.10211229e-02,
0.88305573e-03,
0.74868003e-03,
0.61835541e-03,
0.49214705e-03,
0.36987942e-03,
0.25109114e-03,
0.13533220e-03,
0.21703247e-04,
-0.90926857e-04,
-0.20352546e-03,
-0.31733362e-03,
-0.43350609e-03,
-0.55338885e-03,
-0.67877601e-03,
-0.81268302e-03,
-0.96038845e-03,
-0.11320858e-02,
-0.13475646e-02,
-0.16453972e-02,
-0.20989443e-02,
-0.28392603e-02,
-0.40741763e-02,
-0.60647582e-02,
-0.89876316e-02,
-0.12647802e-01,
-0.16243244e-01,
-0.18616004e-01,
-0.19093577e-01,
-0.18077876e-01,
-0.16530357e-01,
-0.15029374e-01,
-0.13550565e-01,
-0.11885053e-01,
-0.99943755e-02,
-0.81011895e-02,
-0.66745249e-02,
-0.64049885e-02,
-0.81325630e-02,
-0.12245839e-01,
-0.17089149e-01,
-0.20167973e-01,
-0.22245478e-01,
0.20299123e-02,
0.23001628e-02,
0.26366366e-02,
0.29802567e-02,
0.32884392e-02,
0.35349382e-02,
0.37119628e-02,
0.38270687e-02,
0.38946737e-02,
0.39280793e-02,
0.39364123e-02,
0.39253496e-02,
0.38986101e-02,
0.38588869e-02,
0.38082711e-02,
0.37484372e-02,
0.36807158e-02,
0.36061385e-02,
0.35254795e-02,
0.34393084e-02,
0.33480865e-02,
0.32521591e-02,
0.31517555e-02,
0.30470104e-02,
0.29380592e-02,
0.28251486e-02,
0.27085962e-02,
0.25886497e-02,
0.24655664e-02,
0.23397931e-02,
0.22118504e-02,
0.20821332e-02,
0.19509925e-02,
0.18189276e-02,
0.16863818e-02,
0.15537632e-02,
0.14214842e-02,
0.12901434e-02,
0.11600459e-02,
0.10316600e-02,
0.90566400e-03,
0.78258343e-03,
0.66282053e-03,
0.54670696e-03,
0.43438902e-03,
0.32569954e-03,
0.22029619e-03,
0.11765160e-03,
0.16918209e-04,
-0.82841863e-04,
-0.18264857e-03,
-0.28356965e-03,
-0.38668749e-03,
-0.49303711e-03,
-0.60395780e-03,
-0.72176283e-03,
-0.85029344e-03,
-0.99679129e-03,
-0.11756377e-02,
-0.14147292e-02,
-0.17678216e-02,
-0.23343621e-02,
-0.32830362e-02,
-0.48576673e-02,
-0.73089227e-02,
-0.10680980e-01,
-0.14495158e-01,
-0.17674342e-01,
-0.19089572e-01,
-0.18462013e-01,
-0.16541397e-01,
-0.14258571e-01,
-0.11979854e-01,
-0.96273897e-02,
-0.71729873e-02,
-0.49013551e-02,
-0.33841925e-02,
-0.33057944e-02,
-0.52674399e-02,
-0.96266540e-02,
-0.15894135e-01,
-0.21601008e-01,
-0.24401912e-01,
-0.25605565e-01,
0.17854190e-02,
0.20233756e-02,
0.23196433e-02,
0.26221615e-02,
0.28933994e-02,
0.31102363e-02,
0.32658295e-02,
0.33668731e-02,
0.34261208e-02,
0.34553525e-02,
0.34626699e-02,
0.34531134e-02,
0.34299616e-02,
0.33955548e-02,
0.33516849e-02,
0.32997937e-02,
0.32410624e-02,
0.31763858e-02,
0.31063606e-02,
0.30314266e-02,
0.29519610e-02,
0.28682463e-02,
0.27804545e-02,
0.26887048e-02,
0.25930938e-02,
0.24938518e-02,
0.23912911e-02,
0.22855958e-02,
0.21769952e-02,
0.20659990e-02,
0.19530528e-02,
0.18384572e-02,
0.17226556e-02,
0.16060474e-02,
0.14889956e-02,
0.13717994e-02,
0.12548816e-02,
0.11386774e-02,
0.10235143e-02,
0.90985751e-03,
0.79824886e-03,
0.68927044e-03,
0.58327196e-03,
0.48052520e-03,
0.38123387e-03,
0.28524606e-03,
0.19232801e-03,
0.10188403e-03,
0.13148935e-04,
-0.74649892e-04,
-0.16256304e-03,
-0.25152223e-03,
-0.34248683e-03,
-0.43625894e-03,
-0.53394295e-03,
-0.63723064e-03,
-0.74901024e-03,
-0.87452604e-03,
-0.10243639e-02,
-0.12190320e-02,
-0.14982653e-02,
-0.19371029e-02,
-0.26686641e-02,
-0.39032472e-02,
-0.59056627e-02,
-0.88597359e-02,
-0.12578117e-01,
-0.16234819e-01,
-0.18552493e-01,
-0.18625131e-01,
-0.16659645e-01,
-0.13658211e-01,
-0.10453119e-01,
-0.73239915e-02,
-0.43831225e-02,
-0.19895632e-02,
-0.75665663e-03,
-0.12641177e-02,
-0.37606840e-02,
-0.81160162e-02,
-0.14009449e-01,
-0.20516826e-01,
-0.25252979e-01,
-0.26749684e-01,
-0.26949666e-01,
0.15589494e-02,
0.17668876e-02,
0.20257886e-02,
0.22901346e-02,
0.25271075e-02,
0.27164973e-02,
0.28523379e-02,
0.29405085e-02,
0.29921937e-02,
0.30177287e-02,
0.30242268e-02,
0.30161096e-02,
0.29962433e-02,
0.29666633e-02,
0.29289203e-02,
0.28842422e-02,
0.28336111e-02,
0.27777818e-02,
0.27172987e-02,
0.26525310e-02,
0.25837307e-02,
0.25110987e-02,
0.24347827e-02,
0.23548861e-02,
0.22715516e-02,
0.21849549e-02,
0.20953005e-02,
0.20027831e-02,
0.19077143e-02,
0.18105077e-02,
0.17114856e-02,
0.16110209e-02,
0.15095366e-02,
0.14073697e-02,
0.13047857e-02,
0.12020444e-02,
0.10995269e-02,
0.99746429e-03,
0.89634967e-03,
0.79651998e-03,
0.69846597e-03,
0.60272694e-03,
0.50964806e-03,
0.41947985e-03,
0.33230876e-03,
0.24821598e-03,
0.16693448e-03,
0.87793873e-04,
0.10210203e-04,
-0.66529035e-04,
-0.14337612e-03,
-0.22124445e-03,
-0.30088221e-03,
-0.38298120e-03,
-0.46846602e-03,
-0.55856747e-03,
-0.65543054e-03,
-0.76301250e-03,
-0.88915287e-03,
-0.10492229e-02,
-0.12729177e-02,
-0.16169698e-02,
-0.21851282e-02,
-0.31512871e-02,
-0.47627124e-02,
-0.72655436e-02,
-0.10682470e-01,
-0.14481976e-01,
-0.17479382e-01,
-0.18369909e-01,
-0.16695704e-01,
-0.13192192e-01,
-0.90477718e-02,
-0.50837407e-02,
-0.17456600e-02,
0.50775765e-03,
0.11405456e-02,
-0.15771549e-03,
-0.32012013e-02,
-0.74220751e-02,
-0.12348277e-01,
-0.17880509e-01,
-0.23407269e-01,
-0.26902754e-01,
-0.27527036e-01,
-0.27351677e-01,
0.13498098e-02,
0.15299571e-02,
0.17542662e-02,
0.19832880e-02,
0.21885794e-02,
0.23526303e-02,
0.24702842e-02,
0.25466559e-02,
0.25914581e-02,
0.26136602e-02,
0.26194309e-02,
0.26126066e-02,
0.25956824e-02,
0.25704356e-02,
0.25382130e-02,
0.25000437e-02,
0.24567009e-02,
0.24088090e-02,
0.23568932e-02,
0.23012925e-02,
0.22421621e-02,
0.21796003e-02,
0.21137316e-02,
0.20446836e-02,
0.19726283e-02,
0.18976876e-02,
0.18199275e-02,
0.17396152e-02,
0.16571450e-02,
0.15727388e-02,
0.14866672e-02,
0.13993700e-02,
0.13112020e-02,
0.12224860e-02,
0.11333575e-02,
0.10441266e-02,
0.95500512e-03,
0.86619117e-03,
0.77818817e-03,
0.69130980e-03,
0.60596765e-03,
0.52263023e-03,
0.44163744e-03,
0.36320378e-03,
0.28736555e-03,
0.21433813e-03,
0.14382407e-03,
0.75184740e-04,
0.79467936e-05,
-0.58581456e-04,
-0.12522114e-03,
-0.19278513e-03,
-0.26187330e-03,
-0.33317646e-03,
-0.40739289e-03,
-0.48542794e-03,
-0.56885718e-03,
-0.66080934e-03,
-0.76709641e-03,
-0.89942804e-03,
-0.10802879e-02,
-0.13528165e-02,
-0.17975677e-02,
-0.25551619e-02,
-0.38419224e-02,
-0.59169503e-02,
-0.89288345e-02,
-0.12605214e-01,
-0.15980633e-01,
-0.17610485e-01,
-0.16458714e-01,
-0.12757922e-01,
-0.78148004e-02,
-0.30447829e-02,
0.58803929e-03,
0.24830827e-02,
0.23169408e-02,
0.21695795e-03,
-0.31757678e-02,
-0.70471163e-02,
-0.10940184e-01,
-0.15055859e-01,
-0.19874115e-01,
-0.24761699e-01,
-0.27603908e-01,
-0.27851652e-01,
-0.27539870e-01,
0.11573090e-02,
0.13118297e-02,
0.15042420e-02,
0.17007013e-02,
0.18768052e-02,
0.20175355e-02,
0.21184757e-02,
0.21840241e-02,
0.22225198e-02,
0.22416625e-02,
0.22467452e-02,
0.22410604e-02,
0.22267597e-02,
0.22053893e-02,
0.21781034e-02,
0.21457481e-02,
0.21089534e-02,
0.20682334e-02,
0.20240291e-02,
0.19766658e-02,
0.19262728e-02,
0.18728701e-02,
0.18165240e-02,
0.17574138e-02,
0.16956952e-02,
0.16314362e-02,
0.15646521e-02,
0.14956590e-02,
0.14248277e-02,
0.13522609e-02,
0.12782239e-02,
0.12031234e-02,
0.11273304e-02,
0.10510463e-02,
0.97442413e-03,
0.89773658e-03,
0.82104641e-03,
0.74460293e-03,
0.66874258e-03,
0.59397513e-03,
0.52050536e-03,
0.44870362e-03,
0.37896109e-03,
0.31140892e-03,
0.24619559e-03,
0.18341106e-03,
0.12278830e-03,
0.63891501e-04,
0.61988922e-05,
-0.50909595e-04,
-0.10814507e-03,
-0.16616660e-03,
-0.22550514e-03,
-0.28680690e-03,
-0.35062019e-03,
-0.41758217e-03,
-0.48887444e-03,
-0.56698389e-03,
-0.65631996e-03,
-0.76583039e-03,
-0.91279822e-03,
-0.11302640e-02,
-0.14807846e-02,
-0.20767434e-02,
-0.31004245e-02,
-0.47965096e-02,
-0.73740366e-02,
-0.10751202e-01,
-0.14214644e-01,
-0.16375747e-01,
-0.15823588e-01,
-0.12226088e-01,
-0.67599509e-02,
-0.13356704e-02,
0.24713113e-02,
0.38747264e-02,
0.28679580e-02,
0.11903104e-03,
-0.33430753e-02,
-0.66695027e-02,
-0.96058799e-02,
-0.12532249e-01,
-0.16198792e-01,
-0.21001449e-01,
-0.25733067e-01,
-0.28118331e-01,
-0.27977712e-01,
-0.27381005e-01,
0.98083925e-03,
0.11118341e-02,
0.12749631e-02,
0.14415317e-02,
0.15908498e-02,
0.17101859e-02,
0.17957988e-02,
0.18514196e-02,
0.18841210e-02,
0.19004331e-02,
0.19048484e-02,
0.19001653e-02,
0.18882133e-02,
0.18703013e-02,
0.18473966e-02,
0.18202132e-02,
0.17892951e-02,
0.17550559e-02,
0.17178158e-02,
0.16778519e-02,
0.16353233e-02,
0.15902224e-02,
0.15425772e-02,
0.14925570e-02,
0.14402777e-02,
0.13857534e-02,
0.13290886e-02,
0.12705566e-02,
0.12104037e-02,
0.11487597e-02,
0.10858473e-02,
0.10220249e-02,
0.95764559e-03,
0.89280307e-03,
0.82777342e-03,
0.76263881e-03,
0.69743692e-03,
0.63244364e-03,
0.56787435e-03,
0.50428713e-03,
0.44183279e-03,
0.38074178e-03,
0.32141685e-03,
0.26393912e-03,
0.20854473e-03,
0.15521001e-03,
0.10369664e-03,
0.53771990e-04,
0.48405859e-05,
-0.43625452e-04,
-0.92199283e-04,
-0.14142317e-03,
-0.19180843e-03,
-0.24387297e-03,
-0.29810442e-03,
-0.35491036e-03,
-0.41522519e-03,
-0.48093690e-03,
-0.55558363e-03,
-0.64595538e-03,
-0.76544558e-03,
-0.93956996e-03,
-0.12170107e-02,
-0.16869402e-02,
-0.24994221e-02,
-0.38717622e-02,
-0.60297726e-02,
-0.90123136e-02,
-0.12333807e-01,
-0.14771741e-01,
-0.14760138e-01,
-0.11492035e-01,
-0.58502830e-02,
-0.39222432e-04,
0.38003121e-02,
0.46805777e-02,
0.29477894e-02,
-0.19698367e-03,
-0.34707133e-02,
-0.61565377e-02,
-0.82763545e-02,
-0.10341127e-01,
-0.13046334e-01,
-0.17025929e-01,
-0.22158746e-01,
-0.26648698e-01,
-0.28339002e-01,
-0.27549669e-01,
-0.26395839e-01,
0.81985246e-03,
0.92936639e-03,
0.10657535e-02,
0.12050235e-02,
0.13298774e-02,
0.14296711e-02,
0.15012774e-02,
0.15478174e-02,
0.15752059e-02,
0.15889057e-02,
0.15926794e-02,
0.15888754e-02,
0.15790189e-02,
0.15641790e-02,
0.15451561e-02,
0.15225782e-02,
0.14969195e-02,
0.14685052e-02,
0.14375511e-02,
0.14042476e-02,
0.13687719e-02,
0.13311717e-02,
0.12914606e-02,
0.12497153e-02,
0.12060066e-02,
0.11603512e-02,
0.11129703e-02,
0.10640363e-02,
0.10136392e-02,
0.96200767e-03,
0.90932270e-03,
0.85588358e-03,
0.80193224e-03,
0.74761838e-03,
0.69321913e-03,
0.63866674e-03,
0.58403739e-03,
0.52954548e-03,
0.47544795e-03,
0.42208337e-03,
0.36977371e-03,
0.31858659e-03,
0.26881197e-03,
0.22063967e-03,
0.17424425e-03,
0.12957763e-03,
0.86436587e-04,
0.44699889e-04,
0.37974135e-05,
-0.36786892e-04,
-0.77447279e-04,
-0.11859972e-03,
-0.16077398e-03,
-0.20438076e-03,
-0.24981576e-03,
-0.29733716e-03,
-0.34771886e-03,
-0.40232454e-03,
-0.46408881e-03,
-0.53816853e-03,
-0.63491607e-03,
-0.77419216e-03,
-0.99393132e-03,
-0.13644308e-02,
-0.20072565e-02,
-0.31077981e-02,
-0.48824749e-02,
-0.74348073e-02,
-0.10457132e-01,
-0.12935034e-01,
-0.13322859e-01,
-0.10510216e-01,
-0.50372370e-02,
0.82519592e-03,
0.45412472e-02,
0.49564531e-02,
0.27259453e-02,
-0.53395459e-03,
-0.34414190e-02,
-0.54976437e-02,
-0.69722342e-02,
-0.84393835e-02,
-0.10464143e-01,
-0.13568074e-01,
-0.18088147e-01,
-0.23360549e-01,
-0.27191006e-01,
-0.27798105e-01,
-0.26014257e-01,
-0.23941122e-01,
0.67388569e-03,
0.76391455e-03,
0.87603967e-03,
0.99053816e-03,
0.10931880e-02,
0.11752385e-02,
0.12341201e-02,
0.12724005e-02,
0.12949465e-02,
0.13062552e-02,
0.13094257e-02,
0.13063867e-02,
0.12983767e-02,
0.12862559e-02,
0.12706857e-02,
0.12522101e-02,
0.12312382e-02,
0.12080271e-02,
0.11827156e-02,
0.11554065e-02,
0.11262480e-02,
0.10953922e-02,
0.10628636e-02,
0.10286153e-02,
0.99264085e-03,
0.95504802e-03,
0.91611175e-03,
0.87590015e-03,
0.83438883e-03,
0.79185999e-03,
0.74851344e-03,
0.70453959e-03,
0.66006870e-03,
0.61538786e-03,
0.57062833e-03,
0.52572385e-03,
0.48075779e-03,
0.43583699e-03,
0.39130700e-03,
0.34728937e-03,
0.30423937e-03,
0.26211512e-03,
0.22102211e-03,
0.18136523e-03,
0.14320243e-03,
0.10641267e-03,
0.70891205e-04,
0.36568774e-04,
0.29850753e-05,
-0.30428575e-04,
-0.63911211e-04,
-0.97726115e-04,
-0.13241505e-03,
-0.16834371e-03,
-0.20574214e-03,
-0.24483376e-03,
-0.28623073e-03,
-0.33093616e-03,
-0.38131754e-03,
-0.44136596e-03,
-0.51900127e-03,
-0.62973931e-03,
-0.80306787e-03,
-0.10939565e-02,
-0.15995824e-02,
-0.24733264e-02,
-0.39081872e-02,
-0.60330946e-02,
-0.86653326e-02,
-0.10997782e-01,
-0.11619342e-01,
-0.93000261e-02,
-0.42796796e-02,
0.12966705e-02,
0.47367909e-02,
0.48012077e-02,
0.23494719e-02,
-0.77558518e-03,
-0.32301398e-02,
-0.47397721e-02,
-0.57379329e-02,
-0.67931442e-02,
-0.83390176e-02,
-0.10730988e-01,
-0.14351542e-01,
-0.19255424e-01,
-0.24215246e-01,
-0.26824960e-01,
-0.25858946e-01,
-0.22609137e-01,
-0.19117815e-01,
0.54258847e-03,
0.61508559e-03,
0.70537691e-03,
0.79757918e-03,
0.88023918e-03,
0.94631047e-03,
0.99372561e-03,
0.10245563e-02,
0.10427287e-02,
0.10518713e-02,
0.10544811e-02,
0.10520979e-02,
0.10457006e-02,
0.10359808e-02,
0.10234818e-02,
0.10086508e-02,
0.99183375e-03,
0.97323948e-03,
0.95295400e-03,
0.93100907e-03,
0.90750493e-03,
0.88267523e-03,
0.85658359e-03,
0.82906929e-03,
0.80003683e-03,
0.76972524e-03,
0.73839410e-03,
0.70602290e-03,
0.67255105e-03,
0.63823280e-03,
0.60333178e-03,
0.56789321e-03,
0.53199811e-03,
0.49601536e-03,
0.45993822e-03,
0.42375299e-03,
0.38750842e-03,
0.35126708e-03,
0.31534981e-03,
0.27984436e-03,
0.24513889e-03,
0.21121174e-03,
0.17798673e-03,
0.14601613e-03,
0.11531570e-03,
0.85622109e-04,
0.56977922e-04,
0.29328881e-04,
0.23219493e-05,
-0.24609051e-04,
-0.51613693e-04,
-0.78829580e-04,
-0.10677023e-03,
-0.13576847e-03,
-0.16589028e-03,
-0.19740456e-03,
-0.23071076e-03,
-0.26662330e-03,
-0.30694614e-03,
-0.35483547e-03,
-0.41624482e-03,
-0.50326047e-03,
-0.63863368e-03,
-0.86484943e-03,
-0.12584554e-02,
-0.19429043e-02,
-0.30815345e-02,
-0.48037078e-02,
-0.70073316e-02,
-0.90698786e-02,
-0.97764749e-02,
-0.79293735e-02,
-0.35561759e-02,
0.14528841e-02,
0.44905520e-02,
0.43353909e-02,
0.19235054e-02,
-0.88058808e-03,
-0.28684181e-02,
-0.39436910e-02,
-0.46082577e-02,
-0.53751953e-02,
-0.65655066e-02,
-0.84049981e-02,
-0.11191580e-01,
-0.15233751e-01,
-0.20167785e-01,
-0.24146404e-01,
-0.24793619e-01,
-0.21611178e-01,
-0.16357245e-01,
-0.11137053e-01,
0.42571200e-03,
0.48259774e-03,
0.55344484e-03,
0.62578957e-03,
0.69064385e-03,
0.74247847e-03,
0.77967369e-03,
0.80386118e-03,
0.81812969e-03,
0.82533201e-03,
0.82742231e-03,
0.82559290e-03,
0.82059979e-03,
0.81299205e-03,
0.80320571e-03,
0.79159206e-03,
0.77843288e-03,
0.76390296e-03,
0.74805156e-03,
0.73085778e-03,
0.71239163e-03,
0.69290656e-03,
0.67251193e-03,
0.65096776e-03,
0.62812673e-03,
0.60432340e-03,
0.57976606e-03,
0.55436889e-03,
0.52808766e-03,
0.50110236e-03,
0.47373594e-03,
0.44590727e-03,
0.41769189e-03,
0.38944930e-03,
0.36113514e-03,
0.33273242e-03,
0.30424978e-03,
0.27580233e-03,
0.24755922e-03,
0.21969499e-03,
0.19240828e-03,
0.16580452e-03,
0.13966703e-03,
0.11454133e-03,
0.90488385e-04,
0.67134846e-04,
0.44648674e-04,
0.22957218e-04,
0.17676108e-05,
-0.19375148e-04,
-0.40583618e-04,
-0.61928760e-04,
-0.83859930e-04,
-0.10665116e-03,
-0.13027368e-03,
-0.15504849e-03,
-0.18114736e-03,
-0.20928915e-03,
-0.24077985e-03,
-0.27812496e-03,
-0.32568991e-03,
-0.39278640e-03,
-0.49672712e-03,
-0.66978729e-03,
-0.97107701e-03,
-0.14971532e-02,
-0.23801890e-02,
-0.37356641e-02,
-0.55097779e-02,
-0.72345133e-02,
-0.79152975e-02,
-0.64894208e-02,
-0.28653429e-02,
0.13867198e-02,
0.39382749e-02,
0.36805528e-02,
0.15104047e-02,
-0.85980649e-03,
-0.24118538e-02,
-0.31621221e-02,
-0.36013129e-02,
-0.41603735e-02,
-0.50707515e-02,
-0.64725522e-02,
-0.85721835e-02,
-0.11695998e-01,
-0.15934667e-01,
-0.20252047e-01,
-0.22257976e-01,
-0.20039089e-01,
-0.14158701e-01,
-0.70773158e-02,
-0.11583769e-02,
0.32308648e-03,
0.36626059e-03,
0.42002957e-03,
0.47493313e-03,
0.52414869e-03,
0.56347938e-03,
0.59169793e-03,
0.61004912e-03,
0.62088534e-03,
0.62637340e-03,
0.62798650e-03,
0.62661694e-03,
0.62283722e-03,
0.61707193e-03,
0.60965854e-03,
0.60086075e-03,
0.59088779e-03,
0.57988992e-03,
0.56789967e-03,
0.55486214e-03,
0.54083182e-03,
0.52603544e-03,
0.51060918e-03,
0.49428100e-03,
0.47690922e-03,
0.45883877e-03,
0.44021819e-03,
0.42093766e-03,
0.40098530e-03,
0.38046407e-03,
0.35971330e-03,
0.33858226e-03,
0.31714686e-03,
0.29569783e-03,
0.27421847e-03,
0.25265093e-03,
0.23099354e-03,
0.20943003e-03,
0.18794239e-03,
0.16680970e-03,
0.14604363e-03,
0.12587215e-03,
0.10602640e-03,
0.86910019e-04,
0.68677633e-04,
0.50912015e-04,
0.33862842e-04,
0.17410162e-04,
0.13113118e-05,
-0.14745443e-04,
-0.30839601e-04,
-0.47037924e-04,
-0.63687643e-04,
-0.81004437e-04,
-0.98914243e-04,
-0.11776508e-03,
-0.13753906e-03,
-0.15888251e-03,
-0.18270283e-03,
-0.21094215e-03,
-0.24672056e-03,
-0.29703090e-03,
-0.37476781e-03,
-0.50383923e-03,
-0.72860206e-03,
-0.11220841e-02,
-0.17865424e-02,
-0.28166992e-02,
-0.41856705e-02,
-0.55507738e-02,
-0.61370027e-02,
-0.50718379e-02,
-0.22190376e-02,
0.11870584e-02,
0.32180531e-02,
0.29439172e-02,
0.11399792e-02,
-0.75004215e-03,
-0.19179990e-02,
-0.24332437e-02,
-0.27225760e-02,
-0.31262944e-02,
-0.38075345e-02,
-0.48529524e-02,
-0.64000175e-02,
-0.87149860e-02,
-0.12041125e-01,
-0.15928924e-01,
-0.18521670e-01,
-0.17406538e-01,
-0.11980806e-01,
-0.43471926e-02,
0.24430454e-02,
0.64657470e-02,
0.23461202e-03,
0.26596323e-03,
0.30500707e-03,
0.34487346e-03,
0.38060735e-03,
0.40916057e-03,
0.42964329e-03,
0.44296391e-03,
0.45083603e-03,
0.45483361e-03,
0.45601939e-03,
0.45503493e-03,
0.45229631e-03,
0.44811305e-03,
0.44273314e-03,
0.43635038e-03,
0.42911444e-03,
0.42114657e-03,
0.41246001e-03,
0.40299704e-03,
0.39280354e-03,
0.38204770e-03,
0.37087622e-03,
0.35903638e-03,
0.34639766e-03,
0.33326566e-03,
0.31975409e-03,
0.30575541e-03,
0.29127134e-03,
0.27634719e-03,
0.26128866e-03,
0.24593511e-03,
0.23035664e-03,
0.21476793e-03,
0.19919084e-03,
0.18352638e-03,
0.16776577e-03,
0.15213400e-03,
0.13650597e-03,
0.12117127e-03,
0.10604783e-03,
0.91420814e-04,
0.77014876e-04,
0.63102707e-04,
0.49865157e-04,
0.36943147e-04,
0.24579298e-04,
0.12646942e-04,
0.93583714e-06,
-0.10726711e-04,
-0.22409520e-04,
-0.34169330e-04,
-0.46268411e-04,
-0.58847447e-04,
-0.71843184e-04,
-0.85561762e-04,
-0.99899378e-04,
-0.11538975e-03,
-0.13264996e-03,
-0.15311912e-03,
-0.17894371e-03,
-0.21519083e-03,
-0.27111307e-03,
-0.36378118e-03,
-0.52517361e-03,
-0.80816675e-03,
-0.12878858e-02,
-0.20363682e-02,
-0.30407030e-02,
-0.40585143e-02,
-0.45180605e-02,
-0.37541774e-02,
-0.16340034e-02,
0.92716783e-03,
0.24486976e-02,
0.22101640e-02,
0.82245702e-03,
-0.59355749e-03,
-0.14349412e-02,
-0.17818650e-02,
-0.19710022e-02,
-0.22550686e-02,
-0.27460111e-02,
-0.34975049e-02,
-0.45994679e-02,
-0.62465151e-02,
-0.86843111e-02,
-0.11772950e-01,
-0.14247025e-01,
-0.13897673e-01,
-0.95402114e-02,
-0.26456779e-02,
0.37637560e-02,
0.75113312e-02,
0.82785152e-02,
0.16024915e-03,
0.18166313e-03,
0.20833101e-03,
0.23556003e-03,
0.25996531e-03,
0.27946456e-03,
0.29345075e-03,
0.30254686e-03,
0.30792650e-03,
0.31066421e-03,
0.31148014e-03,
0.31080900e-03,
0.30893844e-03,
0.30608277e-03,
0.30241298e-03,
0.29805984e-03,
0.29311786e-03,
0.28767882e-03,
0.28175494e-03,
0.27529473e-03,
0.26832905e-03,
0.26097632e-03,
0.25335979e-03,
0.24527783e-03,
0.23663967e-03,
0.22766842e-03,
0.21843993e-03,
0.20887410e-03,
0.19898124e-03,
0.18877641e-03,
0.17849728e-03,
0.16801062e-03,
0.15736978e-03,
0.14671541e-03,
0.13608270e-03,
0.12537625e-03,
0.11459773e-03,
0.10394175e-03,
0.93250273e-04,
0.82784871e-04,
0.72433824e-04,
0.62449340e-04,
0.52622323e-04,
0.43097367e-04,
0.34058194e-04,
0.25217514e-04,
0.16786646e-04,
0.86424679e-05,
0.63486573e-06,
-0.73354054e-05,
-0.15307762e-04,
-0.23343211e-04,
-0.31608852e-04,
-0.40205730e-04,
-0.49073769e-04,
-0.58462469e-04,
-0.68241599e-04,
-0.78819096e-04,
-0.90594622e-04,
-0.10456181e-03,
-0.12214016e-03,
-0.14677636e-03,
-0.18476187e-03,
-0.24763195e-03,
-0.35712289e-03,
-0.54928858e-03,
-0.87579578e-03,
-0.13871513e-02,
-0.20772433e-02,
-0.27833104e-02,
-0.31113259e-02,
-0.25939168e-02,
-0.11258019e-02,
0.66039932e-03,
0.17193498e-02,
0.15396239e-02,
0.55894168e-03,
-0.42620959e-03,
-0.99759083e-03,
-0.12229718e-02,
-0.13435301e-02,
-0.15339972e-02,
-0.18679998e-02,
-0.23784370e-02,
-0.31225604e-02,
-0.42320755e-02,
-0.58987723e-02,
-0.81079528e-02,
-0.10059725e-01,
-0.10070107e-01,
-0.69461395e-02,
-0.15970924e-02,
0.35173711e-02,
0.64823525e-02,
0.69939806e-02,
0.58447313e-02,
0.99998928e-04,
0.11336149e-03,
0.13000234e-03,
0.14699313e-03,
0.16222145e-03,
0.17438777e-03,
0.18311353e-03,
0.18878833e-03,
0.19214548e-03,
0.19385564e-03,
0.19436714e-03,
0.19395056e-03,
0.19278521e-03,
0.19100346e-03,
0.18871219e-03,
0.18599514e-03,
0.18291226e-03,
0.17952279e-03,
0.17582905e-03,
0.17179834e-03,
0.16745202e-03,
0.16285951e-03,
0.15811202e-03,
0.15307403e-03,
0.14767774e-03,
0.14207418e-03,
0.13631718e-03,
0.13035099e-03,
0.12418116e-03,
0.11780974e-03,
0.11139606e-03,
0.10484913e-03,
0.98205564e-04,
0.91553651e-04,
0.84926301e-04,
0.78246710e-04,
0.71511990e-04,
0.64868305e-04,
0.58195605e-04,
0.51666491e-04,
0.45197361e-04,
0.38973347e-04,
0.32842243e-04,
0.26893933e-04,
0.21249898e-04,
0.15730866e-04,
0.10472757e-04,
0.53960257e-05,
0.39317513e-06,
-0.45791730e-05,
-0.95536843e-05,
-0.14566379e-04,
-0.19728041e-04,
-0.25091455e-04,
-0.30624815e-04,
-0.36488018e-04,
-0.42586758e-04,
-0.49185081e-04,
-0.56528501e-04,
-0.65242508e-04,
-0.76187855e-04,
-0.91523340e-04,
-0.11515653e-03,
-0.15424404e-03,
-0.22232164e-03,
-0.34186023e-03,
-0.54521387e-03,
-0.86433312e-03,
-0.12963127e-02,
-0.17405468e-02,
-0.19500151e-02,
-0.16286803e-02,
-0.70585759e-03,
0.42125979e-03,
0.10887617e-02,
0.97085984e-03,
0.34771912e-03,
-0.27306599e-03,
-0.62840234e-03,
-0.76508470e-03,
-0.83742116e-03,
-0.95511600e-03,
-0.11631359e-02,
-0.14807530e-02,
-0.19422875e-02,
-0.26292126e-02,
-0.36683388e-02,
-0.50780545e-02,
-0.63869823e-02,
-0.64897770e-02,
-0.44976156e-02,
-0.92731958e-03,
0.25404119e-02,
0.45405924e-02,
0.48503033e-02,
0.40294230e-02,
0.27683314e-02,
0.53884014e-04,
0.61084211e-04,
0.70050810e-04,
0.79205915e-04,
0.87411223e-04,
0.93966446e-04,
0.98667610e-04,
0.10172475e-03,
0.10353317e-03,
0.10445446e-03,
0.10473084e-03,
0.10450836e-03,
0.10388237e-03,
0.10292211e-03,
0.10168495e-03,
0.10021854e-03,
0.98558390e-04,
0.96735770e-04,
0.94745832e-04,
0.92573500e-04,
0.90232592e-04,
0.87755776e-04,
0.85199157e-04,
0.82488186e-04,
0.79575933e-04,
0.76551951e-04,
0.73451745e-04,
0.70241033e-04,
0.66919587e-04,
0.63485968e-04,
0.60029004e-04,
0.56497996e-04,
0.52914555e-04,
0.49328813e-04,
0.45763074e-04,
0.42167223e-04,
0.38533406e-04,
0.34952976e-04,
0.31360858e-04,
0.27841659e-04,
0.24351986e-04,
0.21002805e-04,
0.17696189e-04,
0.14492869e-04,
0.11447947e-04,
0.84757721e-05,
0.56412364e-05,
0.29090747e-05,
0.20986288e-06,
-0.24671158e-05,
-0.51494958e-05,
-0.78480261e-05,
-0.10632524e-04,
-0.13520086e-04,
-0.16503614e-04,
-0.19662311e-04,
-0.22948981e-04,
-0.26503118e-04,
-0.30458663e-04,
-0.35155379e-04,
-0.41044819e-04,
-0.49301310e-04,
-0.62018436e-04,
-0.83043000e-04,
-0.11966786e-03,
-0.18399033e-03,
-0.29346853e-03,
-0.46542627e-03,
-0.69852220e-03,
-0.93878357e-03,
-0.10528294e-02,
-0.88006578e-03,
-0.38117066e-03,
0.22924476e-03,
0.59057289e-03,
0.52561943e-03,
0.18708916e-03,
-0.14889365e-03,
-0.34005527e-03,
-0.41272707e-03,
-0.45099761e-03,
-0.51413663e-03,
-0.62613422e-03,
-0.79706579e-03,
-0.10450790e-02,
-0.14138672e-02,
-0.19733894e-02,
-0.27403242e-02,
-0.34682453e-02,
-0.35488785e-02,
-0.24659703e-02,
-0.48296375e-03,
0.14571503e-02,
0.25734871e-02,
0.27371510e-02,
0.22676513e-02,
0.15553402e-02,
0.87312493e-03,
0.21932714e-04,
0.24863417e-04,
0.28513090e-04,
0.32239484e-04,
0.35579265e-04,
0.38247381e-04,
0.40160809e-04,
0.41405037e-04,
0.42140975e-04,
0.42515869e-04,
0.42628493e-04,
0.42538381e-04,
0.42284017e-04,
0.41893109e-04,
0.41388936e-04,
0.40791474e-04,
0.40115974e-04,
0.39374940e-04,
0.38564984e-04,
0.37680649e-04,
0.36728081e-04,
0.35719502e-04,
0.34679109e-04,
0.33576438e-04,
0.32390075e-04,
0.31158215e-04,
0.29896764e-04,
0.28590852e-04,
0.27239597e-04,
0.25841982e-04,
0.24434608e-04,
0.22996630e-04,
0.21537227e-04,
0.20077452e-04,
0.18627210e-04,
0.17164399e-04,
0.15684327e-04,
0.14226660e-04,
0.12765439e-04,
0.11332698e-04,
0.99115950e-05,
0.85493384e-05,
0.72026537e-05,
0.58994046e-05,
0.46591872e-05,
0.34499117e-05,
0.22957561e-05,
0.11843766e-05,
0.84998014e-07,
-0.10040685e-05,
-0.20963864e-05,
-0.31941836e-05,
-0.43282666e-05,
-0.55030100e-05,
-0.67179099e-05,
-0.80033133e-05,
-0.93413128e-05,
-0.10787697e-04,
-0.12397514e-04,
-0.14309594e-04,
-0.16705344e-04,
-0.20065328e-04,
-0.25239178e-04,
-0.33791446e-04,
-0.48691152e-04,
-0.74860320e-04,
-0.11940804e-03,
-0.18940083e-03,
-0.28432420e-03,
-0.38224252e-03,
-0.42882701e-03,
-0.35856038e-03,
-0.15526464e-03,
0.93624702e-04,
0.24092782e-03,
0.21429047e-03,
0.76112388e-04,
-0.60849816e-04,
-0.13861567e-03,
-0.16805886e-03,
-0.18353794e-03,
-0.20919893e-03,
-0.25477342e-03,
-0.32431912e-03,
-0.42517454e-03,
-0.57509379e-03,
-0.80277218e-03,
-0.11159416e-02,
-0.14153966e-02,
-0.14518298e-02,
-0.10097986e-02,
-0.19427406e-03,
0.60561736e-03,
0.10654813e-02,
0.11315914e-02,
0.93659549e-03,
0.64201583e-03,
0.36030522e-03,
0.14866883e-03,
0.41634707e-05,
0.47198023e-05,
0.54126149e-05,
0.61199912e-05,
0.67539763e-05,
0.72604594e-05,
0.76236788e-05,
0.78598650e-05,
0.79995607e-05,
0.80707214e-05,
0.80921054e-05,
0.80750178e-05,
0.80267509e-05,
0.79525425e-05,
0.78568110e-05,
0.77433706e-05,
0.76151509e-05,
0.74745158e-05,
0.73207625e-05,
0.71528843e-05,
0.69720704e-05,
0.67805959e-05,
0.65831077e-05,
0.63738212e-05,
0.61485744e-05,
0.59146905e-05,
0.56752488e-05,
0.54273883e-05,
0.51709094e-05,
0.49056021e-05,
0.46384298e-05,
0.43654286e-05,
0.40883574e-05,
0.38112394e-05,
0.35359881e-05,
0.32583384e-05,
0.29773378e-05,
0.27006156e-05,
0.24232718e-05,
0.21512824e-05,
0.18814886e-05,
0.16229329e-05,
0.13672592e-05,
0.11198919e-05,
0.88442783e-06,
0.65489411e-06,
0.43578387e-06,
0.22484058e-06,
0.16117639e-07,
-0.19059540e-06,
-0.39797052e-06,
-0.60633920e-06,
-0.82165008e-06,
-0.10446255e-05,
-0.12752704e-05,
-0.15192638e-05,
-0.17732648e-05,
-0.20478196e-05,
-0.23534012e-05,
-0.27163851e-05,
-0.31711115e-05,
-0.38089131e-05,
-0.47909707e-05,
-0.64142382e-05,
-0.92423534e-05,
-0.14209581e-04,
-0.22665536e-04,
-0.35952206e-04,
-0.53973064e-04,
-0.72565315e-04,
-0.81414371e-04,
-0.68077694e-04,
-0.29477938e-04,
0.17784128e-04,
0.45754921e-04,
0.40691128e-04,
0.14446910e-04,
-0.11559983e-04,
-0.26320620e-04,
-0.31904819e-04,
-0.34839621e-04,
-0.39709423e-04,
-0.48360336e-04,
-0.61561077e-04,
-0.80702943e-04,
-0.10915505e-03,
-0.15237252e-03,
-0.21185735e-03,
-0.26881794e-03,
-0.27586648e-03,
-0.19191137e-03,
-0.36795147e-04,
0.11542100e-03,
0.20291690e-03,
0.21544744e-03,
0.17828873e-03,
0.12219916e-03,
0.68575428e-04,
0.28294970e-04,
0.53851386e-05,
]
)
return spherical_albedo, albedo, expected_r1
| true | true |
f72eebafad7228c74cea213d49c8f2565bcb055c | 112,270 | py | Python | AI-env/lib/python3.7/site-packages/paramiko/transport.py | parth5795/iOT-benchmarking | 6dfc95907d85f50ee2c68592ce78b4212be5f823 | [
"MIT"
] | 12 | 2018-07-18T05:45:18.000Z | 2020-02-19T21:27:33.000Z | AI-env/lib/python3.7/site-packages/paramiko/transport.py | parth5795/iOT-benchmarking | 6dfc95907d85f50ee2c68592ce78b4212be5f823 | [
"MIT"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | AI-env/lib/python3.7/site-packages/paramiko/transport.py | parth5795/iOT-benchmarking | 6dfc95907d85f50ee2c68592ce78b4212be5f823 | [
"MIT"
] | 6 | 2019-02-20T00:42:29.000Z | 2020-08-08T17:01:43.000Z | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Core protocol implementation
"""
from __future__ import print_function
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1, sha256, sha512
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth
from paramiko.channel import Channel
from paramiko.common import (
xffffffff,
cMSG_CHANNEL_OPEN,
cMSG_IGNORE,
cMSG_GLOBAL_REQUEST,
DEBUG,
MSG_KEXINIT,
MSG_IGNORE,
MSG_DISCONNECT,
MSG_DEBUG,
ERROR,
WARNING,
cMSG_UNIMPLEMENTED,
INFO,
cMSG_KEXINIT,
cMSG_NEWKEYS,
MSG_NEWKEYS,
cMSG_REQUEST_SUCCESS,
cMSG_REQUEST_FAILURE,
CONNECTION_FAILED_CODE,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
OPEN_SUCCEEDED,
cMSG_CHANNEL_OPEN_FAILURE,
cMSG_CHANNEL_OPEN_SUCCESS,
MSG_GLOBAL_REQUEST,
MSG_REQUEST_SUCCESS,
MSG_REQUEST_FAILURE,
MSG_CHANNEL_OPEN_SUCCESS,
MSG_CHANNEL_OPEN_FAILURE,
MSG_CHANNEL_OPEN,
MSG_CHANNEL_SUCCESS,
MSG_CHANNEL_FAILURE,
MSG_CHANNEL_DATA,
MSG_CHANNEL_EXTENDED_DATA,
MSG_CHANNEL_WINDOW_ADJUST,
MSG_CHANNEL_REQUEST,
MSG_CHANNEL_EOF,
MSG_CHANNEL_CLOSE,
MIN_WINDOW_SIZE,
MIN_PACKET_SIZE,
MAX_WINDOW_SIZE,
DEFAULT_WINDOW_SIZE,
DEFAULT_MAX_PACKET_SIZE,
HIGHEST_USERAUTH_MESSAGE_ID,
MSG_UNIMPLEMENTED,
MSG_NAMES,
)
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.ed25519key import Ed25519Key
from paramiko.kex_gex import KexGex, KexGexSHA256
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_group14 import KexGroup14
from paramiko.kex_ecdh_nist import KexNistp256, KexNistp384, KexNistp521
from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b, input, PY2
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (
SSHException,
BadAuthenticationType,
ChannelException,
ProxyCommandFailure,
)
from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport(threading.Thread, ClosingContextManager):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
`channels <.Channel>`, across the session. Multiple channels can be
multiplexed across a single session (and often are, in the case of port
forwardings).
Instances of this class may be used as context managers.
"""
_ENCRYPT = object()
_DECRYPT = object()
_PROTO_ID = "2.0"
_CLIENT_ID = "paramiko_{}".format(paramiko.__version__)
# These tuples of algorithm identifiers are in preference order; do not
# reorder without reason!
_preferred_ciphers = (
"aes128-ctr",
"aes192-ctr",
"aes256-ctr",
"aes128-cbc",
"aes192-cbc",
"aes256-cbc",
"blowfish-cbc",
"3des-cbc",
)
_preferred_macs = (
"hmac-sha2-256",
"hmac-sha2-512",
"hmac-sha1",
"hmac-md5",
"hmac-sha1-96",
"hmac-md5-96",
)
_preferred_keys = (
"ssh-ed25519",
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521",
"ssh-rsa",
"ssh-dss",
)
_preferred_kex = (
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"diffie-hellman-group-exchange-sha256",
"diffie-hellman-group-exchange-sha1",
"diffie-hellman-group14-sha1",
"diffie-hellman-group1-sha1",
)
_preferred_gsskex = (
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==",
)
_preferred_compression = ("none",)
_cipher_info = {
"aes128-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 16,
},
"aes192-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 24,
},
"aes256-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 32,
},
"blowfish-cbc": {
"class": algorithms.Blowfish,
"mode": modes.CBC,
"block-size": 8,
"key-size": 16,
},
"aes128-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 16,
},
"aes192-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 24,
},
"aes256-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 32,
},
"3des-cbc": {
"class": algorithms.TripleDES,
"mode": modes.CBC,
"block-size": 8,
"key-size": 24,
},
}
_mac_info = {
"hmac-sha1": {"class": sha1, "size": 20},
"hmac-sha1-96": {"class": sha1, "size": 12},
"hmac-sha2-256": {"class": sha256, "size": 32},
"hmac-sha2-512": {"class": sha512, "size": 64},
"hmac-md5": {"class": md5, "size": 16},
"hmac-md5-96": {"class": md5, "size": 12},
}
_key_info = {
"ssh-rsa": RSAKey,
"ssh-rsa-cert-v01@openssh.com": RSAKey,
"ssh-dss": DSSKey,
"ssh-dss-cert-v01@openssh.com": DSSKey,
"ecdsa-sha2-nistp256": ECDSAKey,
"ecdsa-sha2-nistp256-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp384": ECDSAKey,
"ecdsa-sha2-nistp384-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp521": ECDSAKey,
"ecdsa-sha2-nistp521-cert-v01@openssh.com": ECDSAKey,
"ssh-ed25519": Ed25519Key,
"ssh-ed25519-cert-v01@openssh.com": Ed25519Key,
}
_kex_info = {
"diffie-hellman-group1-sha1": KexGroup1,
"diffie-hellman-group14-sha1": KexGroup14,
"diffie-hellman-group-exchange-sha1": KexGex,
"diffie-hellman-group-exchange-sha256": KexGexSHA256,
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup1,
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup14,
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGex,
"ecdh-sha2-nistp256": KexNistp256,
"ecdh-sha2-nistp384": KexNistp384,
"ecdh-sha2-nistp521": KexNistp521,
}
_compression_info = {
# zlib@openssh.com is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
"zlib@openssh.com": (ZlibCompressor, ZlibDecompressor),
"zlib": (ZlibCompressor, ZlibDecompressor),
"none": (None, None),
}
_modulus_pack = None
_active_check_timeout = 0.1
def __init__(
self,
sock,
default_window_size=DEFAULT_WINDOW_SIZE,
default_max_packet_size=DEFAULT_MAX_PACKET_SIZE,
gss_kex=False,
gss_deleg_creds=True,
):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the `.Transport` object; it doesn't begin
the SSH session yet. Use `connect` or `start_client` to begin a client
session, or `start_server` to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- ``send(str)``: Writes from 1 to ``len(str)`` bytes, and returns an
int representing the number of bytes written. Returns
0 or raises ``EOFError`` if the stream has been closed.
- ``recv(int)``: Reads from 1 to ``int`` bytes and returns them as a
string. Returns 0 or raises ``EOFError`` if the stream has been
closed.
- ``close()``: Closes the socket.
- ``settimeout(n)``: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the ``sock`` argument. (A host string is a hostname with an
optional port (separated by ``":"``) which will be converted into a
tuple of ``(hostname, port)``.) A socket will be connected to this
address and used for communication. Exceptions from the ``socket``
call may be thrown in this case.
.. note::
Modifying the the window and packet sizes might have adverse
effects on your channels created from this transport. The default
values are the same as in the OpenSSH code base and have been
battle tested.
:param socket sock:
a socket or socket-like object to create the session over.
:param int default_window_size:
sets the default window size on the transport. (defaults to
2097152)
:param int default_max_packet_size:
sets the default max packet size on the transport. (defaults to
32768)
.. versionchanged:: 1.15
Added the ``default_window_size`` and ``default_max_packet_size``
arguments.
"""
self.active = False
self.hostname = None
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(":", 1)
self.hostname = hl[0]
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
self.hostname = hostname
reason = "No suitable address family"
addrinfos = socket.getaddrinfo(
hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
for family, socktype, proto, canonname, sockaddr in addrinfos:
if socktype == socket.SOCK_STREAM:
af = family
# addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
"Unable to connect to {}: {}".format(hostname, reason)
)
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never propagated.
self.sock.settimeout(self._active_check_timeout)
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = "SSH-" + self._PROTO_ID + "-" + self._CLIENT_ID
self.remote_version = ""
self.local_cipher = self.remote_cipher = ""
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# GSS-API / SSPI Key Exchange
self.use_gss_kex = gss_kex
# This will be set to True if GSS-API Key Exchange was performed
self.gss_kex_used = False
self.kexgss_ctxt = None
self.gss_host = None
if self.use_gss_kex:
self.kexgss_ctxt = GSSAuth("gssapi-keyex", gss_deleg_creds)
self._preferred_kex = self._preferred_gsskex + self._preferred_kex
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
# synchronization (always higher level than write_lock)
self.lock = threading.Lock()
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 0
self.default_max_packet_size = default_max_packet_size
self.default_window_size = default_window_size
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = "paramiko.transport"
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
# response Message from an arbitrary global request
self.global_response = None
# user-defined event callbacks
self.completion_event = None
# how long (seconds) to wait for the SSH banner
self.banner_timeout = 15
# how long (seconds) to wait for the handshake to finish after SSH
# banner sent.
self.handshake_timeout = 15
# how long (seconds) to wait for the auth response.
self.auth_timeout = 30
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
id_ = hex(long(id(self)) & xffffffff)
out = "<paramiko.Transport at {}".format(id_)
if not self.active:
out += " (unconnected)"
else:
if self.local_cipher != "":
out += " (cipher {}, {:d} bits)".format(
self.local_cipher,
self._cipher_info[self.local_cipher]["key-size"] * 8,
)
if self.is_authenticated():
out += " (active; {} open channel(s))".format(
len(self._channels)
)
elif self.initial_kex_done:
out += " (connected; awaiting auth)"
else:
out += " (connecting)"
out += ">"
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
.. versionadded:: 1.5.3
"""
self.sock.close()
self.close()
def get_security_options(self):
"""
Return a `.SecurityOptions` object which can be used to tweak the
encryption algorithms this transport will permit (for encryption,
digest/hash operations, public keys, and key exchanges) and the order
of preference for them.
"""
return SecurityOptions(self)
def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
"""
Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``.
"""
# No GSSAPI in play == nothing to do
if not gssapi_requested:
return
# Obtain the correct host first - did user request a GSS-specific name
# to use that is distinct from the actual SSH target hostname?
if gss_host is None:
gss_host = self.hostname
# Finally, canonicalize via DNS if DNS is trusted.
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
# And set attribute for reference later.
self.gss_host = gss_host
def start_client(self, event=None, timeout=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new `.Transport`. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotiation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling `auth_password <Transport.auth_password>` or
`auth_publickey <Transport.auth_publickey>`.
.. note:: `connect` is a simpler method for connecting as a client.
.. note::
After calling this method (or `start_server` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete (optional)
:param float timeout:
a timeout, in seconds, for SSH2 session negotiation (optional)
:raises:
`.SSHException` -- if negotiation fails (and no ``event`` was
passed in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
max_time = time.time() + timeout if timeout is not None else None
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set() or (
timeout is not None and time.time() >= max_time
):
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new `.Transport` and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotiation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods `get_allowed_auths
<.ServerInterface.get_allowed_auths>`, `check_auth_none
<.ServerInterface.check_auth_none>`, `check_auth_password
<.ServerInterface.check_auth_password>`, and `check_auth_publickey
<.ServerInterface.check_auth_publickey>` in the given ``server`` object
to control the authentication process.
After a successful authentication, the client should request to open a
channel. Override `check_channel_request
<.ServerInterface.check_channel_request>` in the given ``server``
object to allow channels to be opened.
.. note::
After calling this method (or `start_client` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete.
:param .ServerInterface server:
an object used to perform authentication and create `channels
<.Channel>`
:raises:
`.SSHException` -- if negotiation fails (and no ``event`` was
passed in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
:param .PKey key:
the host key to add, usually an `.RSAKey` or `.DSSKey`.
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with `add_server_key`, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, ``None`` is returned. In client mode, the behavior is undefined.
:return:
host key (`.PKey`) of the type negotiated by the client, or
``None``.
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
@staticmethod
def load_server_moduli(filename=None):
"""
(optional)
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like ``/etc/ssh/moduli``).
If you call `load_server_moduli` and it returns ``True``, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
:param str filename:
optional path to the moduli file, if you happen to know that it's
not in a standard location.
:return:
True if a moduli file was successfully loaded; False otherwise.
.. note:: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ["/etc/ssh/moduli", "/usr/local/etc/moduli"]
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
.. note::
Previously this call returned a tuple of ``(key type, key
string)``. You can get the same effect by calling `.PKey.get_name`
for the key type, and ``str(key)`` for the key string.
:raises: `.SSHException` -- if no session is currently active.
:return: public key (`.PKey`) of the remote server
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
:return:
True if the session is still active (open); False if the session is
closed
"""
return self.active
def open_session(
self, window_size=None, max_packet_size=None, timeout=None
):
"""
Request a new channel to the server, of type ``"session"``. This is
just an alias for calling `open_channel` with an argument of
``"session"``.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the session created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:return: a new `.Channel`
:raises:
`.SSHException` -- if the request is rejected or the session ends
prematurely
.. versionchanged:: 1.13.4/1.14.3/1.15.3
Added the ``timeout`` argument.
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
return self.open_channel(
"session",
window_size=window_size,
max_packet_size=max_packet_size,
timeout=timeout,
)
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type ``"x11"``. This
is just an alias for ``open_channel('x11', src_addr=src_addr)``.
:param tuple src_addr:
the source address (``(str, int)``) of the x11 server (port is the
x11 port, ie. 6010)
:return: a new `.Channel`
:raises:
`.SSHException` -- if the request is rejected or the session ends
prematurely
"""
return self.open_channel("x11", src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
``"auth-agent@openssh.com"``.
This is just an alias for ``open_channel('auth-agent@openssh.com')``.
:return: a new `.Channel`
:raises: `.SSHException` --
if the request is rejected or the session ends prematurely
"""
return self.open_channel("auth-agent@openssh.com")
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
"""
Request a new channel back to the client, of type ``forwarded-tcpip``.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
:param src_addr: originator's address
:param dest_addr: local (server) connected address
"""
return self.open_channel("forwarded-tcpip", dest_addr, src_addr)
def open_channel(
self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None,
):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:param float timeout:
optional timeout opening a channel, default 3600s (1h)
:return: a new `.Channel` on success
:raises:
`.SSHException` -- if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
if not self.active:
raise SSHException("SSH session not active")
timeout = 3600 if timeout is None else timeout
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == "forwarded-tcpip") or (kind == "direct-tcpip"):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == "x11":
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
start_ts = time.time()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
if event.is_set():
break
elif start_ts + timeout < time.time():
raise SSHException("Timeout opening channel.")
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(
channel,
(origin_addr, origin_port),
(server_addr, server_port),
)
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises:
`.SSHException` -- if the server refused the TCP forward request
"""
if not self.active:
raise SSHException("SSH session not active")
port = int(port)
response = self.global_request(
"tcpip-forward", (address, port), wait=True
)
if response is None:
raise SSHException("TCP forwarding request denied")
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
# src_addr, src_port = src_addr_port
# dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
:param str address: the address to stop forwarding
:param int port: the port to stop forwarding
"""
if not self.active:
return
self._tcp_handler = None
self.global_request("cancel-tcpip-forward", (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success, an
SFTP session will be opened with the remote host, and a new
`.SFTPClient` object will be returned.
:return:
a new `.SFTPClient` referring to an sftp session (channel) across
this transport
"""
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
:param int byte_count:
the number of random bytes to send in the payload of the ignored
packet -- defaults to a random number from 10 to 41.
"""
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
:raises:
`.SSHException` -- if the key renegotiation failed (which causes
the session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if self.completion_event.is_set():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
``interval`` seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
:param int interval:
seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
"""
def _request(x=weakref.proxy(self)):
return x.global_request("keepalive@lag.net", wait=False)
self.packetizer.set_keepalive(interval, _request)
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied.
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "{}"'.format(kind))
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.is_set():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout,
``None`` is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(
self,
hostkey=None,
username="",
password=None,
pkey=None,
gss_host=None,
gss_auth=False,
gss_kex=False,
gss_deleg_creds=True,
gss_trust_dns=True,
):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for `start_client`, `get_remote_server_key`, and
`Transport.auth_password` or `Transport.auth_publickey`. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call `open_channel` or
`open_session` to get a `.Channel` object, which is used for data
transfer.
.. note::
If you fail to supply a password or private key, this method may
succeed, but a subsequent `open_channel` or `open_session` call may
fail because you haven't authenticated yet.
:param .PKey hostkey:
the host key expected from the server, or ``None`` if you don't
want to do host key verification.
:param str username: the username to authenticate as.
:param str password:
a password to use for authentication, if you want to use password
authentication; otherwise ``None``.
:param .PKey pkey:
a private key to use for authentication, if you want to use private
key authentication; otherwise ``None``.
:param str gss_host:
The target's name in the kerberos database. Default: hostname
:param bool gss_auth:
``True`` if you want to use GSS-API authentication.
:param bool gss_kex:
Perform GSS-API Key Exchange and user authentication.
:param bool gss_deleg_creds:
Whether to delegate GSS-API client credentials.
:param gss_trust_dns:
Indicates whether or not the DNS is trusted to securely
canonicalize the name of the host being connected to (default
``True``).
:raises: `.SSHException` -- if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
.. versionchanged:: 2.3
Added the ``gss_trust_dns`` argument.
"""
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.set_gss_host(
gss_host=gss_host,
trust_dns=gss_trust_dns,
gssapi_requested=gss_kex or gss_auth,
)
self.start_client()
# check host key if we were given one
# If GSS-API Key Exchange was performed, we are not required to check
# the host key.
if (hostkey is not None) and not gss_kex:
key = self.get_remote_server_key()
if (
key.get_name() != hostkey.get_name()
or key.asbytes() != hostkey.asbytes()
):
self._log(DEBUG, "Bad host key from server")
self._log(
DEBUG,
"Expected: {}: {}".format(
hostkey.get_name(), repr(hostkey.asbytes())
),
)
self._log(
DEBUG,
"Got : {}: {}".format(
key.get_name(), repr(key.asbytes())
),
)
raise SSHException("Bad host key from server")
self._log(
DEBUG, "Host key verified ({})".format(hostkey.get_name())
)
if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
if gss_auth:
self._log(
DEBUG, "Attempting GSS-API auth... (gssapi-with-mic)"
) # noqa
self.auth_gssapi_with_mic(
username, self.gss_host, gss_deleg_creds
)
elif gss_kex:
self._log(DEBUG, "Attempting GSS-API auth... (gssapi-keyex)")
self.auth_gssapi_keyex(username)
elif pkey is not None:
self._log(DEBUG, "Attempting public-key auth...")
self.auth_publickey(username, pkey)
else:
self._log(DEBUG, "Attempting password auth...")
self.auth_password(username, password)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like `start_client`. The exception (if any) is cleared after
this call.
:return:
an exception, or ``None`` if there is no stored exception.
.. versionadded:: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see `.SubsystemHandler` for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the `.SubsystemHandler` constructor later.
:param str name: name of the subsystem.
:param handler:
subclass of `.SubsystemHandler` that handles this subsystem.
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
:return:
True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
"""
return (
self.active
and self.auth_handler is not None
and self.auth_handler.is_authenticated()
)
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns ``None``.
:return: username that was authenticated (a `str`), or ``None``.
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
"""
Return the banner supplied by the server upon connect. If no banner is
supplied, this method returns ``None``.
:returns: server supplied banner (`str`), or ``None``.
.. versionadded:: 1.13
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
`.BadAuthenticationType` exception raised.
:param str username: the username to authenticate as
:return:
list of auth types permissible for the next stage of
authentication (normally empty)
:raises:
`.BadAuthenticationType` -- if "none" authentication isn't allowed
by the server for this user
:raises:
`.SSHException` -- if the authentication failed due to a network
error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and ``fallback`` is ``True`` (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param basestring password: the password to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:param bool fallback:
``True`` if an attempt at an automated "interactive" password auth
should be made if the server doesn't support normal password auth
:return:
list of auth types permissible for the next stage of
authentication (normally empty)
:raises:
`.BadAuthenticationType` -- if password authentication isn't
allowed by the server for this user (and no event was passed in)
:raises:
`.AuthenticationException` -- if the authentication failed (and no
event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure
# link
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# if password auth isn't allowed, but keyboard-interactive *is*,
# try to fudge it
if not fallback or ("keyboard-interactive" not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException("Fallback authentication failed.")
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param .PKey key: the private key to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:return:
list of auth types permissible for the next stage of
authentication (normally empty)
:raises:
`.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
:raises:
`.AuthenticationException` -- if the authentication failed (and no
event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=""):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
list of auth types permissible for the next stage of
authentication (normally empty).
:raises: `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user
:raises: `.AuthenticationException` -- if the authentication failed
:raises: `.SSHException` -- if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(
username, handler, my_event, submethods
)
return self.auth_handler.wait_for_response(my_event)
def auth_interactive_dumb(self, username, handler=None, submethods=""):
"""
Autenticate to the server interactively but dumber.
Just print the prompt and / or instructions to stdout and send back
the response. This is good for situations where partial auth is
achieved by key and then the user has to enter a 2fac token.
"""
if not handler:
def handler(title, instructions, prompt_list):
answers = []
if title:
print(title.strip())
if instructions:
print(instructions.strip())
for prompt, show_input in prompt_list:
print(prompt.strip(), end=" ")
answers.append(input())
return answers
return self.auth_interactive(username, handler, submethods)
def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
"""
Authenticate to the Server using GSS-API / SSPI.
:param str username: The username to authenticate as
:param str gss_host: The target host
:param bool gss_deleg_creds: Delegate credentials or not
:return: list of auth types permissible for the next stage of
authentication (normally empty)
:raises: `.BadAuthenticationType` -- if gssapi-with-mic isn't
allowed by the server (and no event was passed in)
:raises:
`.AuthenticationException` -- if the authentication failed (and no
event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_with_mic(
username, gss_host, gss_deleg_creds, my_event
)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_keyex(self, username):
"""
Authenticate to the server with GSS-API/SSPI if GSS-API kex is in use.
:param str username: The username to authenticate as.
:returns:
a list of auth types permissible for the next stage of
authentication (normally empty)
:raises: `.BadAuthenticationType` --
if GSS-API Key Exchange was not performed (and no event was passed
in)
:raises: `.AuthenticationException` --
if the authentication failed (and no event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_keyex(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
``"paramiko.transport"`` but it can be set to anything you want. (See
the `.logging` module for more info.) SSH Channels will log to a
sub-channel of the one specified.
:param str name: new channel name for logging
.. versionadded:: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
:return: channel name as a `str`
.. versionadded:: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
:param bool hexdump:
``True`` to log protocol traffix (in hex) to the log; ``False``
otherwise.
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return ``True`` if the transport is currently logging hex dumps of
protocol traffic.
:return: ``True`` if hex dumps are being logged, else ``False``.
.. versionadded:: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling `connect`, etc). By default,
compression is off since it negatively affects interactive sessions.
:param bool compress:
``True`` to ask the remote client/server to compress traffic;
``False`` to refuse compression
.. versionadded:: 1.5.2
"""
if compress:
self._preferred_compression = ("zlib@openssh.com", "zlib", "none")
else:
self._preferred_compression = ("none",)
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around ``getpeername`` on the underlying
socket. If the socket-like object has no ``getpeername`` method, then
``("unknown", 0)`` is returned.
:return:
the address of the remote host, if known, as a ``(str, int)``
tuple.
"""
gp = getattr(self.sock, "getpeername", None)
if gp is None:
return "unknown", 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
if PY2:
# Original join logic; #520 doesn't appear commonly present under
# Python 2.
while self.is_alive() and self is not threading.current_thread():
self.join(10)
else:
# Keep trying to join() our main thread, quickly, until:
# * We join()ed successfully (self.is_alive() == False)
# * Or it looks like we've hit issue #520 (socket.recv hitting some
# race condition preventing it from timing out correctly), wherein
# our socket and packetizer are both closed (but where we'd
# otherwise be sitting forever on that recv()).
while (
self.is_alive()
and self is not threading.current_thread()
and not self.sock._closed
and not self.packetizer.closed
):
self.join(0.1)
# internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"""used by KexGex to find primes for group exchange"""
return self._modulus_pack
def _next_channel(self):
"""you are holding the lock"""
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"""used by a Channel to remove itself from the active channel list"""
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(
DEBUG, "Dropping user packet because connection is dead."
) # noqa
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.is_set():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException(
"Key-exchange timed out waiting for key negotiation"
) # noqa
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"""
Used by a kex obj to set the K (root key) and H (exchange hash).
"""
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
"""
Used by a kex obj to register the next packet type it expects to see.
"""
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException("Unknown host key type")
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException(
"Signature verification ({}) failed.".format(
self.host_key_type
)
) # noqa
self.host_key = key
def _compute_key(self, id, nbytes):
"""id is 'A' - 'F' for the various keys used by ssh"""
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
# Fallback to SHA1 for kex engines that fail to specify a hex
# algorithm, or for e.g. transport tests that don't run kexinit.
hash_algo = getattr(self.kex_engine, "hash_algo", None)
hash_select_msg = "kex engine {} specified hash_algo {!r}".format(
self.kex_engine.__class__.__name__, hash_algo
)
if hash_algo is None:
hash_algo = sha1
hash_select_msg += ", falling back to sha1"
if not hasattr(self, "_logged_hash_selection"):
self._log(DEBUG, hash_select_msg)
setattr(self, "_logged_hash_selection", True)
out = sofar = hash_algo(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = hash_algo(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv, operation):
if name not in self._cipher_info:
raise SSHException("Unknown client cipher " + name)
else:
cipher = Cipher(
self._cipher_info[name]["class"](key),
self._cipher_info[name]["mode"](iv),
backend=default_backend(),
)
if operation is self._ENCRYPT:
return cipher.encryptor()
else:
return cipher.decryptor()
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def _sanitize_window_size(self, window_size):
if window_size is None:
window_size = self.default_window_size
return clamp_value(MIN_WINDOW_SIZE, window_size, MAX_WINDOW_SIZE)
def _sanitize_packet_size(self, max_packet_size):
if max_packet_size is None:
max_packet_size = self.default_max_packet_size
return clamp_value(MIN_PACKET_SIZE, max_packet_size, MAX_WINDOW_SIZE)
def _ensure_authed(self, ptype, message):
"""
Checks message type against current auth state.
If server mode, and auth has not succeeded, and the message is of a
post-auth type (channel open or global request) an appropriate error
response Message is crafted and returned to caller for sending.
Otherwise (client mode, authed, or pre-auth message) returns None.
"""
if (
not self.server_mode
or ptype <= HIGHEST_USERAUTH_MESSAGE_ID
or self.is_authenticated()
):
return None
# WELP. We must be dealing with someone trying to do non-auth things
# without being authed. Tell them off, based on message class.
reply = Message()
# Global requests have no details, just failure.
if ptype == MSG_GLOBAL_REQUEST:
reply.add_byte(cMSG_REQUEST_FAILURE)
# Channel opens let us reject w/ a specific type + message.
elif ptype == MSG_CHANNEL_OPEN:
kind = message.get_text() # noqa
chanid = message.get_int()
reply.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
reply.add_int(chanid)
reply.add_int(OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED)
reply.add_string("")
reply.add_string("en")
# NOTE: Post-open channel messages do not need checking; the above will
# reject attemps to open channels, meaning that even if a malicious
# user tries to send a MSG_CHANNEL_REQUEST, it will simply fall under
# the logic that handles unknown channel IDs (as the channel list will
# be empty.)
return reply
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
tid = hex(long(id(self)) & xffffffff)
if self.server_mode:
self._log(DEBUG, "starting thread (server mode): {}".format(tid))
else:
self._log(DEBUG, "starting thread (client mode): {}".format(tid))
try:
try:
self.packetizer.write_all(b(self.local_version + "\r\n"))
self._log(
DEBUG,
"Local version/idstring: {}".format(self.local_version),
) # noqa
self._check_banner()
# The above is actually very much part of the handshake, but
# sometimes the banner can be read but the machine is not
# responding, for example when the remote ssh daemon is loaded
# in to memory but we can not read from the disk/spawn a new
# shell.
# Make sure we can specify a timeout for the initial handshake.
# Re-use the banner timeout for now.
self.packetizer.start_handshake(self.handshake_timeout)
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException(
"Expecting packet from {!r}, got {:d}".format(
self._expected_packet, ptype
)
) # noqa
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 41):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
error_msg = self._ensure_authed(ptype, m)
if error_msg:
self._send_message(error_msg)
else:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(
DEBUG,
"Ignoring message for dead channel {:d}".format( # noqa
chanid
),
)
else:
self._log(
ERROR,
"Channel request for unknown channel {:d}".format( # noqa
chanid
),
)
break
elif (
self.auth_handler is not None
and ptype in self.auth_handler._handler_table
):
handler = self.auth_handler._handler_table[ptype]
handler(self.auth_handler, m)
if len(self._expected_packet) > 0:
continue
else:
# Respond with "I don't implement this particular
# message type" message (unless the message type was
# itself literally MSG_UNIMPLEMENTED, in which case, we
# just shut up to avoid causing a useless loop).
name = MSG_NAMES[ptype]
warning = "Oops, unhandled type {} ({!r})".format(
ptype, name
)
self._log(WARNING, warning)
if ptype != MSG_UNIMPLEMENTED:
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
self.packetizer.complete_handshake()
except SSHException as e:
self._log(ERROR, "Exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, "EOF in transport thread")
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = "{} ({:d})".format(e.args[1], e.args[0])
else: # empty tuple, e.g. socket.timeout
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, "Socket exception: " + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, "Unknown exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
def _log_agreement(self, which, local, remote):
# Log useful, non-duplicative line re: an agreed-upon algorithm.
# Old code implied algorithms could be asymmetrical (different for
# inbound vs outbound) so we preserve that possibility.
msg = "{} agreed: ".format(which)
if local == remote:
msg += local
else:
msg += "local={}, remote={}".format(local, remote)
self._log(DEBUG, msg)
# protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException(
"Error reading SSH protocol banner" + str(e)
)
if buf[:4] == "SSH-":
break
self._log(DEBUG, "Banner: " + buf)
if buf[:4] != "SSH-":
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
self._log(DEBUG, "Remote version/idstring: {}".format(buf))
# pull off any attached comment
# NOTE: comment used to be stored in a variable and then...never used.
# since 2003. ca 877cd974b8182d26fa76d566072917ea67b64e67
i = buf.find(" ")
if i >= 0:
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split("-", 2)
if len(segs) < 3:
raise SSHException("Invalid SSH banner")
version = segs[1]
client = segs[2]
if version != "1.99" and version != "2.0":
msg = "Incompatible version ({} instead of 2.0)"
raise SSHException(msg.format(version))
msg = "Connected (version {}, client {})".format(version, client)
self._log(INFO, msg)
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.gss_kex_used = False
self.in_kex = True
if self.server_mode:
mp_required_prefix = "diffie-hellman-group-exchange-sha"
kex_mp = [
k
for k in self._preferred_kex
if k.startswith(mp_required_prefix)
]
if (self._modulus_pack is None) and (len(kex_mp) > 0):
# can't do group-exchange if we don't have a pack of potential
# primes
pkex = [
k
for k in self.get_security_options().kex
if not k.startswith(mp_required_prefix)
]
self.get_security_options().kex = pkex
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
m.get_bytes(16) # cookie, discarded
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
m.get_int() # unused
self._log(
DEBUG,
"kex algos:"
+ str(kex_algo_list)
+ " server key:"
+ str(server_key_algo_list)
+ " client encrypt:"
+ str(client_encrypt_algo_list)
+ " server encrypt:"
+ str(server_encrypt_algo_list)
+ " client mac:"
+ str(client_mac_algo_list)
+ " server mac:"
+ str(server_mac_algo_list)
+ " client compress:"
+ str(client_compress_algo_list)
+ " server compress:"
+ str(server_compress_algo_list)
+ " client lang:"
+ str(client_lang_list)
+ " server lang:"
+ str(server_lang_list)
+ " kex follows?"
+ str(kex_follows),
)
# as a server, we pick the first item in the client's list that we
# support.
# as a client, we pick the first item in our list that the server
# supports.
if self.server_mode:
agreed_kex = list(
filter(self._preferred_kex.__contains__, kex_algo_list)
)
else:
agreed_kex = list(
filter(kex_algo_list.__contains__, self._preferred_kex)
)
if len(agreed_kex) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable kex algorithm)"
) # noqa
self.kex_engine = self._kex_info[agreed_kex[0]](self)
self._log(DEBUG, "Kex agreed: {}".format(agreed_kex[0]))
if self.server_mode:
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
agreed_keys = list(
filter(
available_server_keys.__contains__, server_key_algo_list
)
)
else:
agreed_keys = list(
filter(server_key_algo_list.__contains__, self._preferred_keys)
)
if len(agreed_keys) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable host key)"
) # noqa
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException(
"Incompatible ssh peer (can't match requested host key type)"
) # noqa
self._log_agreement("HostKey", agreed_keys[0], agreed_keys[0])
if self.server_mode:
agreed_local_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
server_encrypt_algo_list,
)
)
agreed_remote_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
client_encrypt_algo_list,
)
)
else:
agreed_local_ciphers = list(
filter(
client_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
agreed_remote_ciphers = list(
filter(
server_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
if len(agreed_local_ciphers) == 0 or len(agreed_remote_ciphers) == 0:
raise SSHException(
"Incompatible ssh server (no acceptable ciphers)"
) # noqa
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log_agreement(
"Cipher", local=self.local_cipher, remote=self.remote_cipher
)
if self.server_mode:
agreed_remote_macs = list(
filter(self._preferred_macs.__contains__, client_mac_algo_list)
)
agreed_local_macs = list(
filter(self._preferred_macs.__contains__, server_mac_algo_list)
)
else:
agreed_local_macs = list(
filter(client_mac_algo_list.__contains__, self._preferred_macs)
)
agreed_remote_macs = list(
filter(server_mac_algo_list.__contains__, self._preferred_macs)
)
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException("Incompatible ssh server (no acceptable macs)")
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
self._log_agreement(
"MAC", local=self.local_mac, remote=self.remote_mac
)
if self.server_mode:
agreed_remote_compression = list(
filter(
self._preferred_compression.__contains__,
client_compress_algo_list,
)
)
agreed_local_compression = list(
filter(
self._preferred_compression.__contains__,
server_compress_algo_list,
)
)
else:
agreed_local_compression = list(
filter(
client_compress_algo_list.__contains__,
self._preferred_compression,
)
)
agreed_remote_compression = list(
filter(
server_compress_algo_list.__contains__,
self._preferred_compression,
)
)
if (
len(agreed_local_compression) == 0
or len(agreed_remote_compression) == 0
):
msg = "Incompatible ssh server (no acceptable compression)"
msg += " {!r} {!r} {!r}"
raise SSHException(
msg.format(
agreed_local_compression,
agreed_remote_compression,
self._preferred_compression,
)
)
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log_agreement(
"Compression",
local=self.local_compression,
remote=self.remote_compression,
)
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
"""switch on newly negotiated encryption parameters for
inbound traffic"""
block_size = self._cipher_info[self.remote_cipher]["block-size"]
if self.server_mode:
IV_in = self._compute_key("A", block_size)
key_in = self._compute_key(
"C", self._cipher_info[self.remote_cipher]["key-size"]
)
else:
IV_in = self._compute_key("B", block_size)
key_in = self._compute_key(
"D", self._cipher_info[self.remote_cipher]["key-size"]
)
engine = self._get_cipher(
self.remote_cipher, key_in, IV_in, self._DECRYPT
)
mac_size = self._mac_info[self.remote_mac]["size"]
mac_engine = self._mac_info[self.remote_mac]["class"]
# initial mac keys are done in the hash's natural size (not the
# potentially truncated transmission size)
if self.server_mode:
mac_key = self._compute_key("E", mac_engine().digest_size)
else:
mac_key = self._compute_key("F", mac_engine().digest_size)
self.packetizer.set_inbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key
)
compress_in = self._compression_info[self.remote_compression][1]
if compress_in is not None and (
self.remote_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"""switch on newly negotiated encryption parameters for
outbound traffic"""
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]["block-size"]
if self.server_mode:
IV_out = self._compute_key("B", block_size)
key_out = self._compute_key(
"D", self._cipher_info[self.local_cipher]["key-size"]
)
else:
IV_out = self._compute_key("A", block_size)
key_out = self._compute_key(
"C", self._cipher_info[self.local_cipher]["key-size"]
)
engine = self._get_cipher(
self.local_cipher, key_out, IV_out, self._ENCRYPT
)
mac_size = self._mac_info[self.local_mac]["size"]
mac_engine = self._mac_info[self.local_mac]["class"]
# initial mac keys are done in the hash's natural size (not the
# potentially truncated transmission size)
if self.server_mode:
mac_key = self._compute_key("F", mac_engine().digest_size)
else:
mac_key = self._compute_key("E", mac_engine().digest_size)
sdctr = self.local_cipher.endswith("-ctr")
self.packetizer.set_outbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key, sdctr
)
compress_out = self._compression_info[self.local_compression][0]
if compress_out is not None and (
self.local_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == "zlib@openssh.com":
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == "zlib@openssh.com":
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, "Switch to new keys ...")
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, "Disconnect (code {:d}): {}".format(code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "{}"'.format(kind))
want_reply = m.get_boolean()
if not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" global request from server.'.format(kind),
)
ok = False
elif kind == "tcpip-forward":
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == "cancel-tcpip-forward":
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, "Global request successful.")
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, "Global request denied.")
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, "Success for unrequested channel! [??]")
return
self.lock.acquire()
try:
chan._set_remote_channel(
server_chanid, server_window_size, server_max_packet_size
)
self._log(DEBUG, "Secsh channel {:d} opened.".format(chanid))
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
m.get_text() # ignored language
reason_text = CONNECTION_FAILED_CODE.get(reason, "(unknown code)")
self._log(
ERROR,
"Secsh channel {:d} open FAILED: {}: {}".format(
chanid, reason_str, reason_text
),
)
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (
kind == "auth-agent@openssh.com"
and self._forward_agent_handler is not None
):
self._log(DEBUG, "Incoming forward agent connection")
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "x11") and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming x11 connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "forwarded-tcpip") and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming tcp forwarded connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" channel request from server.'.format(kind),
)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == "direct-tcpip":
# handle direct-tcpip requests coming from the client
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid,
(origin_addr, origin_port),
(dest_addr, dest_port),
)
else:
reason = self.server_object.check_channel_request(
kind, my_chanid
)
if reason != OPEN_SUCCEEDED:
self._log(
DEBUG,
'Rejecting "{}" channel request from client.'.format(kind),
)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string("")
msg.add_string("en")
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(
self.default_window_size, self.default_max_packet_size
)
chan._set_remote_channel(
chanid, initial_window_size, max_packet_size
)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.default_window_size)
m.add_int(self.default_max_packet_size)
self._send_message(m)
self._log(
DEBUG, "Secsh channel {:d} ({}) opened.".format(my_chanid, kind)
)
if kind == "auth-agent@openssh.com":
self._forward_agent_handler(chan)
elif kind == "x11":
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == "forwarded-tcpip":
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(
chan, (origin_addr, origin_port), (server_addr, server_port)
)
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
m.get_boolean() # always_display
msg = m.get_string()
m.get_string() # language
self._log(DEBUG, "Debug msg: {}".format(util.safe_string(msg)))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions(object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
`.Transport` (but only if you change them before starting the session).
If you try to add an algorithm that paramiko doesn't recognize,
``ValueError`` will be raised. If you try to assign something besides a
tuple to one of the fields, ``TypeError`` will be raised.
"""
__slots__ = "_transport"
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return "<paramiko.SecurityOptions for {!r}>".format(self._transport)
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError("expected tuple or list")
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError("unknown cipher")
setattr(self._transport, name, x)
@property
def ciphers(self):
"""Symmetric encryption ciphers"""
return self._transport._preferred_ciphers
@ciphers.setter
def ciphers(self, x):
self._set("_preferred_ciphers", "_cipher_info", x)
@property
def digests(self):
"""Digest (one-way hash) algorithms"""
return self._transport._preferred_macs
@digests.setter
def digests(self, x):
self._set("_preferred_macs", "_mac_info", x)
@property
def key_types(self):
"""Public-key algorithms"""
return self._transport._preferred_keys
@key_types.setter
def key_types(self, x):
self._set("_preferred_keys", "_key_info", x)
@property
def kex(self):
"""Key exchange algorithms"""
return self._transport._preferred_kex
@kex.setter
def kex(self, x):
self._set("_preferred_kex", "_kex_info", x)
@property
def compression(self):
"""Compression algorithms"""
return self._transport._preferred_compression
@compression.setter
def compression(self, x):
self._set("_preferred_compression", "_compression_info", x)
class ChannelMap(object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
| 38.330488 | 90 | 0.582854 |
from __future__ import print_function
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1, sha256, sha512
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth
from paramiko.channel import Channel
from paramiko.common import (
xffffffff,
cMSG_CHANNEL_OPEN,
cMSG_IGNORE,
cMSG_GLOBAL_REQUEST,
DEBUG,
MSG_KEXINIT,
MSG_IGNORE,
MSG_DISCONNECT,
MSG_DEBUG,
ERROR,
WARNING,
cMSG_UNIMPLEMENTED,
INFO,
cMSG_KEXINIT,
cMSG_NEWKEYS,
MSG_NEWKEYS,
cMSG_REQUEST_SUCCESS,
cMSG_REQUEST_FAILURE,
CONNECTION_FAILED_CODE,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
OPEN_SUCCEEDED,
cMSG_CHANNEL_OPEN_FAILURE,
cMSG_CHANNEL_OPEN_SUCCESS,
MSG_GLOBAL_REQUEST,
MSG_REQUEST_SUCCESS,
MSG_REQUEST_FAILURE,
MSG_CHANNEL_OPEN_SUCCESS,
MSG_CHANNEL_OPEN_FAILURE,
MSG_CHANNEL_OPEN,
MSG_CHANNEL_SUCCESS,
MSG_CHANNEL_FAILURE,
MSG_CHANNEL_DATA,
MSG_CHANNEL_EXTENDED_DATA,
MSG_CHANNEL_WINDOW_ADJUST,
MSG_CHANNEL_REQUEST,
MSG_CHANNEL_EOF,
MSG_CHANNEL_CLOSE,
MIN_WINDOW_SIZE,
MIN_PACKET_SIZE,
MAX_WINDOW_SIZE,
DEFAULT_WINDOW_SIZE,
DEFAULT_MAX_PACKET_SIZE,
HIGHEST_USERAUTH_MESSAGE_ID,
MSG_UNIMPLEMENTED,
MSG_NAMES,
)
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.ed25519key import Ed25519Key
from paramiko.kex_gex import KexGex, KexGexSHA256
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_group14 import KexGroup14
from paramiko.kex_ecdh_nist import KexNistp256, KexNistp384, KexNistp521
from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b, input, PY2
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (
SSHException,
BadAuthenticationType,
ChannelException,
ProxyCommandFailure,
)
from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport(threading.Thread, ClosingContextManager):
_ENCRYPT = object()
_DECRYPT = object()
_PROTO_ID = "2.0"
_CLIENT_ID = "paramiko_{}".format(paramiko.__version__)
_preferred_ciphers = (
"aes128-ctr",
"aes192-ctr",
"aes256-ctr",
"aes128-cbc",
"aes192-cbc",
"aes256-cbc",
"blowfish-cbc",
"3des-cbc",
)
_preferred_macs = (
"hmac-sha2-256",
"hmac-sha2-512",
"hmac-sha1",
"hmac-md5",
"hmac-sha1-96",
"hmac-md5-96",
)
_preferred_keys = (
"ssh-ed25519",
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521",
"ssh-rsa",
"ssh-dss",
)
_preferred_kex = (
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"diffie-hellman-group-exchange-sha256",
"diffie-hellman-group-exchange-sha1",
"diffie-hellman-group14-sha1",
"diffie-hellman-group1-sha1",
)
_preferred_gsskex = (
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==",
)
_preferred_compression = ("none",)
_cipher_info = {
"aes128-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 16,
},
"aes192-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 24,
},
"aes256-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 32,
},
"blowfish-cbc": {
"class": algorithms.Blowfish,
"mode": modes.CBC,
"block-size": 8,
"key-size": 16,
},
"aes128-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 16,
},
"aes192-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 24,
},
"aes256-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 32,
},
"3des-cbc": {
"class": algorithms.TripleDES,
"mode": modes.CBC,
"block-size": 8,
"key-size": 24,
},
}
_mac_info = {
"hmac-sha1": {"class": sha1, "size": 20},
"hmac-sha1-96": {"class": sha1, "size": 12},
"hmac-sha2-256": {"class": sha256, "size": 32},
"hmac-sha2-512": {"class": sha512, "size": 64},
"hmac-md5": {"class": md5, "size": 16},
"hmac-md5-96": {"class": md5, "size": 12},
}
_key_info = {
"ssh-rsa": RSAKey,
"ssh-rsa-cert-v01@openssh.com": RSAKey,
"ssh-dss": DSSKey,
"ssh-dss-cert-v01@openssh.com": DSSKey,
"ecdsa-sha2-nistp256": ECDSAKey,
"ecdsa-sha2-nistp256-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp384": ECDSAKey,
"ecdsa-sha2-nistp384-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp521": ECDSAKey,
"ecdsa-sha2-nistp521-cert-v01@openssh.com": ECDSAKey,
"ssh-ed25519": Ed25519Key,
"ssh-ed25519-cert-v01@openssh.com": Ed25519Key,
}
_kex_info = {
"diffie-hellman-group1-sha1": KexGroup1,
"diffie-hellman-group14-sha1": KexGroup14,
"diffie-hellman-group-exchange-sha1": KexGex,
"diffie-hellman-group-exchange-sha256": KexGexSHA256,
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup1,
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup14,
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGex,
"ecdh-sha2-nistp256": KexNistp256,
"ecdh-sha2-nistp384": KexNistp384,
"ecdh-sha2-nistp521": KexNistp521,
}
_compression_info = {
"zlib@openssh.com": (ZlibCompressor, ZlibDecompressor),
"zlib": (ZlibCompressor, ZlibDecompressor),
"none": (None, None),
}
_modulus_pack = None
_active_check_timeout = 0.1
def __init__(
self,
sock,
default_window_size=DEFAULT_WINDOW_SIZE,
default_max_packet_size=DEFAULT_MAX_PACKET_SIZE,
gss_kex=False,
gss_deleg_creds=True,
):
self.active = False
self.hostname = None
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(":", 1)
self.hostname = hl[0]
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
self.hostname = hostname
reason = "No suitable address family"
addrinfos = socket.getaddrinfo(
hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
for family, socktype, proto, canonname, sockaddr in addrinfos:
if socktype == socket.SOCK_STREAM:
af = family
# addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
"Unable to connect to {}: {}".format(hostname, reason)
)
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never propagated.
self.sock.settimeout(self._active_check_timeout)
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = "SSH-" + self._PROTO_ID + "-" + self._CLIENT_ID
self.remote_version = ""
self.local_cipher = self.remote_cipher = ""
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# GSS-API / SSPI Key Exchange
self.use_gss_kex = gss_kex
# This will be set to True if GSS-API Key Exchange was performed
self.gss_kex_used = False
self.kexgss_ctxt = None
self.gss_host = None
if self.use_gss_kex:
self.kexgss_ctxt = GSSAuth("gssapi-keyex", gss_deleg_creds)
self._preferred_kex = self._preferred_gsskex + self._preferred_kex
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
# synchronization (always higher level than write_lock)
self.lock = threading.Lock()
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 0
self.default_max_packet_size = default_max_packet_size
self.default_window_size = default_window_size
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = "paramiko.transport"
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
# response Message from an arbitrary global request
self.global_response = None
# user-defined event callbacks
self.completion_event = None
# how long (seconds) to wait for the SSH banner
self.banner_timeout = 15
# how long (seconds) to wait for the handshake to finish after SSH
# banner sent.
self.handshake_timeout = 15
# how long (seconds) to wait for the auth response.
self.auth_timeout = 30
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
id_ = hex(long(id(self)) & xffffffff)
out = "<paramiko.Transport at {}".format(id_)
if not self.active:
out += " (unconnected)"
else:
if self.local_cipher != "":
out += " (cipher {}, {:d} bits)".format(
self.local_cipher,
self._cipher_info[self.local_cipher]["key-size"] * 8,
)
if self.is_authenticated():
out += " (active; {} open channel(s))".format(
len(self._channels)
)
elif self.initial_kex_done:
out += " (connected; awaiting auth)"
else:
out += " (connecting)"
out += ">"
return out
def atfork(self):
self.sock.close()
self.close()
def get_security_options(self):
return SecurityOptions(self)
def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
# No GSSAPI in play == nothing to do
if not gssapi_requested:
return
# Obtain the correct host first - did user request a GSS-specific name
# to use that is distinct from the actual SSH target hostname?
if gss_host is None:
gss_host = self.hostname
# Finally, canonicalize via DNS if DNS is trusted.
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
# And set attribute for reference later.
self.gss_host = gss_host
def start_client(self, event=None, timeout=None):
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
max_time = time.time() + timeout if timeout is not None else None
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set() or (
timeout is not None and time.time() >= max_time
):
break
def start_server(self, event=None, server=None):
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set():
break
def add_server_key(self, key):
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
@staticmethod
def load_server_moduli(filename=None):
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ["/etc/ssh/moduli", "/usr/local/etc/moduli"]
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
def close(self):
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
return self.host_key
def is_active(self):
return self.active
def open_session(
self, window_size=None, max_packet_size=None, timeout=None
):
return self.open_channel(
"session",
window_size=window_size,
max_packet_size=max_packet_size,
timeout=timeout,
)
def open_x11_channel(self, src_addr=None):
return self.open_channel("x11", src_addr=src_addr)
def open_forward_agent_channel(self):
return self.open_channel("auth-agent@openssh.com")
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
return self.open_channel("forwarded-tcpip", dest_addr, src_addr)
def open_channel(
self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None,
):
if not self.active:
raise SSHException("SSH session not active")
timeout = 3600 if timeout is None else timeout
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == "forwarded-tcpip") or (kind == "direct-tcpip"):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == "x11":
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
start_ts = time.time()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
if event.is_set():
break
elif start_ts + timeout < time.time():
raise SSHException("Timeout opening channel.")
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
def request_port_forward(self, address, port, handler=None):
if not self.active:
raise SSHException("SSH session not active")
port = int(port)
response = self.global_request(
"tcpip-forward", (address, port), wait=True
)
if response is None:
raise SSHException("TCP forwarding request denied")
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
# src_addr, src_port = src_addr_port
# dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
if not self.active:
return
self._tcp_handler = None
self.global_request("cancel-tcpip-forward", (address, port), wait=True)
def open_sftp_client(self):
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if self.completion_event.is_set():
break
return
def set_keepalive(self, interval):
def _request(x=weakref.proxy(self)):
return x.global_request("keepalive@lag.net", wait=False)
self.packetizer.set_keepalive(interval, _request)
def global_request(self, kind, data=None, wait=True):
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "{}"'.format(kind))
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.is_set():
break
return self.global_response
def accept(self, timeout=None):
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(
self,
hostkey=None,
username="",
password=None,
pkey=None,
gss_host=None,
gss_auth=False,
gss_kex=False,
gss_deleg_creds=True,
gss_trust_dns=True,
):
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.set_gss_host(
gss_host=gss_host,
trust_dns=gss_trust_dns,
gssapi_requested=gss_kex or gss_auth,
)
self.start_client()
# check host key if we were given one
# If GSS-API Key Exchange was performed, we are not required to check
# the host key.
if (hostkey is not None) and not gss_kex:
key = self.get_remote_server_key()
if (
key.get_name() != hostkey.get_name()
or key.asbytes() != hostkey.asbytes()
):
self._log(DEBUG, "Bad host key from server")
self._log(
DEBUG,
"Expected: {}: {}".format(
hostkey.get_name(), repr(hostkey.asbytes())
),
)
self._log(
DEBUG,
"Got : {}: {}".format(
key.get_name(), repr(key.asbytes())
),
)
raise SSHException("Bad host key from server")
self._log(
DEBUG, "Host key verified ({})".format(hostkey.get_name())
)
if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
if gss_auth:
self._log(
DEBUG, "Attempting GSS-API auth... (gssapi-with-mic)"
) # noqa
self.auth_gssapi_with_mic(
username, self.gss_host, gss_deleg_creds
)
elif gss_kex:
self._log(DEBUG, "Attempting GSS-API auth... (gssapi-keyex)")
self.auth_gssapi_keyex(username)
elif pkey is not None:
self._log(DEBUG, "Attempting public-key auth...")
self.auth_publickey(username, pkey)
else:
self._log(DEBUG, "Attempting password auth...")
self.auth_password(username, password)
return
def get_exception(self):
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
return (
self.active
and self.auth_handler is not None
and self.auth_handler.is_authenticated()
)
def get_username(self):
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# try to fudge it
if not fallback or ("keyboard-interactive" not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException("Fallback authentication failed.")
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=""):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(
username, handler, my_event, submethods
)
return self.auth_handler.wait_for_response(my_event)
def auth_interactive_dumb(self, username, handler=None, submethods=""):
if not handler:
def handler(title, instructions, prompt_list):
answers = []
if title:
print(title.strip())
if instructions:
print(instructions.strip())
for prompt, show_input in prompt_list:
print(prompt.strip(), end=" ")
answers.append(input())
return answers
return self.auth_interactive(username, handler, submethods)
def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_with_mic(
username, gss_host, gss_deleg_creds, my_event
)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_keyex(self, username):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_keyex(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
return self.log_name
def set_hexdump(self, hexdump):
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
if compress:
self._preferred_compression = ("zlib@openssh.com", "zlib", "none")
else:
self._preferred_compression = ("none",)
def getpeername(self):
gp = getattr(self.sock, "getpeername", None)
if gp is None:
return "unknown", 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
if PY2:
# Original join logic; #520 doesn't appear commonly present under
while self.is_alive() and self is not threading.current_thread():
self.join(10)
else:
# race condition preventing it from timing out correctly), wherein
# our socket and packetizer are both closed (but where we'd
while (
self.is_alive()
and self is not threading.current_thread()
and not self.sock._closed
and not self.packetizer.closed
):
self.join(0.1)
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
return self._modulus_pack
def _next_channel(self):
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(
DEBUG, "Dropping user packet because connection is dead."
)
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.is_set():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException(
"Key-exchange timed out waiting for key negotiation"
)
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException("Unknown host key type")
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException(
"Signature verification ({}) failed.".format(
self.host_key_type
)
)
self.host_key = key
def _compute_key(self, id, nbytes):
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
hash_algo = getattr(self.kex_engine, "hash_algo", None)
hash_select_msg = "kex engine {} specified hash_algo {!r}".format(
self.kex_engine.__class__.__name__, hash_algo
)
if hash_algo is None:
hash_algo = sha1
hash_select_msg += ", falling back to sha1"
if not hasattr(self, "_logged_hash_selection"):
self._log(DEBUG, hash_select_msg)
setattr(self, "_logged_hash_selection", True)
out = sofar = hash_algo(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = hash_algo(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv, operation):
if name not in self._cipher_info:
raise SSHException("Unknown client cipher " + name)
else:
cipher = Cipher(
self._cipher_info[name]["class"](key),
self._cipher_info[name]["mode"](iv),
backend=default_backend(),
)
if operation is self._ENCRYPT:
return cipher.encryptor()
else:
return cipher.decryptor()
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def _sanitize_window_size(self, window_size):
if window_size is None:
window_size = self.default_window_size
return clamp_value(MIN_WINDOW_SIZE, window_size, MAX_WINDOW_SIZE)
def _sanitize_packet_size(self, max_packet_size):
if max_packet_size is None:
max_packet_size = self.default_max_packet_size
return clamp_value(MIN_PACKET_SIZE, max_packet_size, MAX_WINDOW_SIZE)
def _ensure_authed(self, ptype, message):
if (
not self.server_mode
or ptype <= HIGHEST_USERAUTH_MESSAGE_ID
or self.is_authenticated()
):
return None
# WELP. We must be dealing with someone trying to do non-auth things
# without being authed. Tell them off, based on message class.
reply = Message()
# Global requests have no details, just failure.
if ptype == MSG_GLOBAL_REQUEST:
reply.add_byte(cMSG_REQUEST_FAILURE)
# Channel opens let us reject w/ a specific type + message.
elif ptype == MSG_CHANNEL_OPEN:
kind = message.get_text() # noqa
chanid = message.get_int()
reply.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
reply.add_int(chanid)
reply.add_int(OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED)
reply.add_string("")
reply.add_string("en")
# NOTE: Post-open channel messages do not need checking; the above will
# reject attemps to open channels, meaning that even if a malicious
# user tries to send a MSG_CHANNEL_REQUEST, it will simply fall under
# the logic that handles unknown channel IDs (as the channel list will
# be empty.)
return reply
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
tid = hex(long(id(self)) & xffffffff)
if self.server_mode:
self._log(DEBUG, "starting thread (server mode): {}".format(tid))
else:
self._log(DEBUG, "starting thread (client mode): {}".format(tid))
try:
try:
self.packetizer.write_all(b(self.local_version + "\r\n"))
self._log(
DEBUG,
"Local version/idstring: {}".format(self.local_version),
) # noqa
self._check_banner()
# The above is actually very much part of the handshake, but
# sometimes the banner can be read but the machine is not
# responding, for example when the remote ssh daemon is loaded
# in to memory but we can not read from the disk/spawn a new
# shell.
# Make sure we can specify a timeout for the initial handshake.
# Re-use the banner timeout for now.
self.packetizer.start_handshake(self.handshake_timeout)
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException(
"Expecting packet from {!r}, got {:d}".format(
self._expected_packet, ptype
)
) # noqa
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 41):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
error_msg = self._ensure_authed(ptype, m)
if error_msg:
self._send_message(error_msg)
else:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(
DEBUG,
"Ignoring message for dead channel {:d}".format( # noqa
chanid
),
)
else:
self._log(
ERROR,
"Channel request for unknown channel {:d}".format( # noqa
chanid
),
)
break
elif (
self.auth_handler is not None
and ptype in self.auth_handler._handler_table
):
handler = self.auth_handler._handler_table[ptype]
handler(self.auth_handler, m)
if len(self._expected_packet) > 0:
continue
else:
# Respond with "I don't implement this particular
# message type" message (unless the message type was
name = MSG_NAMES[ptype]
warning = "Oops, unhandled type {} ({!r})".format(
ptype, name
)
self._log(WARNING, warning)
if ptype != MSG_UNIMPLEMENTED:
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
self.packetizer.complete_handshake()
except SSHException as e:
self._log(ERROR, "Exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, "EOF in transport thread")
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = "{} ({:d})".format(e.args[1], e.args[0])
else:
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, "Socket exception: " + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, "Unknown exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
def _log_agreement(self, which, local, remote):
# Log useful, non-duplicative line re: an agreed-upon algorithm.
# Old code implied algorithms could be asymmetrical (different for
# inbound vs outbound) so we preserve that possibility.
msg = "{} agreed: ".format(which)
if local == remote:
msg += local
else:
msg += "local={}, remote={}".format(local, remote)
self._log(DEBUG, msg)
# protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException(
"Error reading SSH protocol banner" + str(e)
)
if buf[:4] == "SSH-":
break
self._log(DEBUG, "Banner: " + buf)
if buf[:4] != "SSH-":
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
self._log(DEBUG, "Remote version/idstring: {}".format(buf))
# pull off any attached comment
# NOTE: comment used to be stored in a variable and then...never used.
# since 2003. ca 877cd974b8182d26fa76d566072917ea67b64e67
i = buf.find(" ")
if i >= 0:
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split("-", 2)
if len(segs) < 3:
raise SSHException("Invalid SSH banner")
version = segs[1]
client = segs[2]
if version != "1.99" and version != "2.0":
msg = "Incompatible version ({} instead of 2.0)"
raise SSHException(msg.format(version))
msg = "Connected (version {}, client {})".format(version, client)
self._log(INFO, msg)
def _send_kex_init(self):
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.gss_kex_used = False
self.in_kex = True
if self.server_mode:
mp_required_prefix = "diffie-hellman-group-exchange-sha"
kex_mp = [
k
for k in self._preferred_kex
if k.startswith(mp_required_prefix)
]
if (self._modulus_pack is None) and (len(kex_mp) > 0):
# can't do group-exchange if we don't have a pack of potential
# primes
pkex = [
k
for k in self.get_security_options().kex
if not k.startswith(mp_required_prefix)
]
self.get_security_options().kex = pkex
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
m.get_bytes(16) # cookie, discarded
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
m.get_int() # unused
self._log(
DEBUG,
"kex algos:"
+ str(kex_algo_list)
+ " server key:"
+ str(server_key_algo_list)
+ " client encrypt:"
+ str(client_encrypt_algo_list)
+ " server encrypt:"
+ str(server_encrypt_algo_list)
+ " client mac:"
+ str(client_mac_algo_list)
+ " server mac:"
+ str(server_mac_algo_list)
+ " client compress:"
+ str(client_compress_algo_list)
+ " server compress:"
+ str(server_compress_algo_list)
+ " client lang:"
+ str(client_lang_list)
+ " server lang:"
+ str(server_lang_list)
+ " kex follows?"
+ str(kex_follows),
)
# as a server, we pick the first item in the client's list that we
if self.server_mode:
agreed_kex = list(
filter(self._preferred_kex.__contains__, kex_algo_list)
)
else:
agreed_kex = list(
filter(kex_algo_list.__contains__, self._preferred_kex)
)
if len(agreed_kex) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable kex algorithm)"
)
self.kex_engine = self._kex_info[agreed_kex[0]](self)
self._log(DEBUG, "Kex agreed: {}".format(agreed_kex[0]))
if self.server_mode:
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
agreed_keys = list(
filter(
available_server_keys.__contains__, server_key_algo_list
)
)
else:
agreed_keys = list(
filter(server_key_algo_list.__contains__, self._preferred_keys)
)
if len(agreed_keys) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable host key)"
)
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException(
"Incompatible ssh peer (can't match requested host key type)"
) # noqa
self._log_agreement("HostKey", agreed_keys[0], agreed_keys[0])
if self.server_mode:
agreed_local_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
server_encrypt_algo_list,
)
)
agreed_remote_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
client_encrypt_algo_list,
)
)
else:
agreed_local_ciphers = list(
filter(
client_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
agreed_remote_ciphers = list(
filter(
server_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
if len(agreed_local_ciphers) == 0 or len(agreed_remote_ciphers) == 0:
raise SSHException(
"Incompatible ssh server (no acceptable ciphers)"
) # noqa
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log_agreement(
"Cipher", local=self.local_cipher, remote=self.remote_cipher
)
if self.server_mode:
agreed_remote_macs = list(
filter(self._preferred_macs.__contains__, client_mac_algo_list)
)
agreed_local_macs = list(
filter(self._preferred_macs.__contains__, server_mac_algo_list)
)
else:
agreed_local_macs = list(
filter(client_mac_algo_list.__contains__, self._preferred_macs)
)
agreed_remote_macs = list(
filter(server_mac_algo_list.__contains__, self._preferred_macs)
)
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException("Incompatible ssh server (no acceptable macs)")
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
self._log_agreement(
"MAC", local=self.local_mac, remote=self.remote_mac
)
if self.server_mode:
agreed_remote_compression = list(
filter(
self._preferred_compression.__contains__,
client_compress_algo_list,
)
)
agreed_local_compression = list(
filter(
self._preferred_compression.__contains__,
server_compress_algo_list,
)
)
else:
agreed_local_compression = list(
filter(
client_compress_algo_list.__contains__,
self._preferred_compression,
)
)
agreed_remote_compression = list(
filter(
server_compress_algo_list.__contains__,
self._preferred_compression,
)
)
if (
len(agreed_local_compression) == 0
or len(agreed_remote_compression) == 0
):
msg = "Incompatible ssh server (no acceptable compression)"
msg += " {!r} {!r} {!r}"
raise SSHException(
msg.format(
agreed_local_compression,
agreed_remote_compression,
self._preferred_compression,
)
)
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log_agreement(
"Compression",
local=self.local_compression,
remote=self.remote_compression,
)
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
block_size = self._cipher_info[self.remote_cipher]["block-size"]
if self.server_mode:
IV_in = self._compute_key("A", block_size)
key_in = self._compute_key(
"C", self._cipher_info[self.remote_cipher]["key-size"]
)
else:
IV_in = self._compute_key("B", block_size)
key_in = self._compute_key(
"D", self._cipher_info[self.remote_cipher]["key-size"]
)
engine = self._get_cipher(
self.remote_cipher, key_in, IV_in, self._DECRYPT
)
mac_size = self._mac_info[self.remote_mac]["size"]
mac_engine = self._mac_info[self.remote_mac]["class"]
# initial mac keys are done in the hash's natural size (not the
if self.server_mode:
mac_key = self._compute_key("E", mac_engine().digest_size)
else:
mac_key = self._compute_key("F", mac_engine().digest_size)
self.packetizer.set_inbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key
)
compress_in = self._compression_info[self.remote_compression][1]
if compress_in is not None and (
self.remote_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]["block-size"]
if self.server_mode:
IV_out = self._compute_key("B", block_size)
key_out = self._compute_key(
"D", self._cipher_info[self.local_cipher]["key-size"]
)
else:
IV_out = self._compute_key("A", block_size)
key_out = self._compute_key(
"C", self._cipher_info[self.local_cipher]["key-size"]
)
engine = self._get_cipher(
self.local_cipher, key_out, IV_out, self._ENCRYPT
)
mac_size = self._mac_info[self.local_mac]["size"]
mac_engine = self._mac_info[self.local_mac]["class"]
# potentially truncated transmission size)
if self.server_mode:
mac_key = self._compute_key("F", mac_engine().digest_size)
else:
mac_key = self._compute_key("E", mac_engine().digest_size)
sdctr = self.local_cipher.endswith("-ctr")
self.packetizer.set_outbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key, sdctr
)
compress_out = self._compression_info[self.local_compression][0]
if compress_out is not None and (
self.local_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == "zlib@openssh.com":
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == "zlib@openssh.com":
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, "Switch to new keys ...")
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, "Disconnect (code {:d}): {}".format(code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "{}"'.format(kind))
want_reply = m.get_boolean()
if not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" global request from server.'.format(kind),
)
ok = False
elif kind == "tcpip-forward":
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == "cancel-tcpip-forward":
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, "Global request successful.")
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, "Global request denied.")
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, "Success for unrequested channel! [??]")
return
self.lock.acquire()
try:
chan._set_remote_channel(
server_chanid, server_window_size, server_max_packet_size
)
self._log(DEBUG, "Secsh channel {:d} opened.".format(chanid))
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
m.get_text()
reason_text = CONNECTION_FAILED_CODE.get(reason, "(unknown code)")
self._log(
ERROR,
"Secsh channel {:d} open FAILED: {}: {}".format(
chanid, reason_str, reason_text
),
)
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (
kind == "auth-agent@openssh.com"
and self._forward_agent_handler is not None
):
self._log(DEBUG, "Incoming forward agent connection")
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "x11") and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming x11 connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "forwarded-tcpip") and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming tcp forwarded connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" channel request from server.'.format(kind),
)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == "direct-tcpip":
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid,
(origin_addr, origin_port),
(dest_addr, dest_port),
)
else:
reason = self.server_object.check_channel_request(
kind, my_chanid
)
if reason != OPEN_SUCCEEDED:
self._log(
DEBUG,
'Rejecting "{}" channel request from client.'.format(kind),
)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string("")
msg.add_string("en")
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(
self.default_window_size, self.default_max_packet_size
)
chan._set_remote_channel(
chanid, initial_window_size, max_packet_size
)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.default_window_size)
m.add_int(self.default_max_packet_size)
self._send_message(m)
self._log(
DEBUG, "Secsh channel {:d} ({}) opened.".format(my_chanid, kind)
)
if kind == "auth-agent@openssh.com":
self._forward_agent_handler(chan)
elif kind == "x11":
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == "forwarded-tcpip":
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(
chan, (origin_addr, origin_port), (server_addr, server_port)
)
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
m.get_boolean()
msg = m.get_string()
m.get_string()
self._log(DEBUG, "Debug msg: {}".format(util.safe_string(msg)))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions(object):
__slots__ = "_transport"
def __init__(self, transport):
self._transport = transport
def __repr__(self):
return "<paramiko.SecurityOptions for {!r}>".format(self._transport)
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError("expected tuple or list")
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError("unknown cipher")
setattr(self._transport, name, x)
@property
def ciphers(self):
return self._transport._preferred_ciphers
@ciphers.setter
def ciphers(self, x):
self._set("_preferred_ciphers", "_cipher_info", x)
@property
def digests(self):
return self._transport._preferred_macs
@digests.setter
def digests(self, x):
self._set("_preferred_macs", "_mac_info", x)
@property
def key_types(self):
return self._transport._preferred_keys
@key_types.setter
def key_types(self, x):
self._set("_preferred_keys", "_key_info", x)
@property
def kex(self):
return self._transport._preferred_kex
@kex.setter
def kex(self, x):
self._set("_preferred_kex", "_kex_info", x)
@property
def compression(self):
return self._transport._preferred_compression
@compression.setter
def compression(self, x):
self._set("_preferred_compression", "_compression_info", x)
class ChannelMap(object):
def __init__(self):
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
| true | true |
f72eef8c95e4f3ef8a9225bc7ec3eeffb64fa31f | 131 | py | Python | Gluharniki.py | regostar/competitive_prog_py | 56db79c431c5823039e6749063f3b21d5e79ca44 | [
"MIT"
] | null | null | null | Gluharniki.py | regostar/competitive_prog_py | 56db79c431c5823039e6749063f3b21d5e79ca44 | [
"MIT"
] | null | null | null | Gluharniki.py | regostar/competitive_prog_py | 56db79c431c5823039e6749063f3b21d5e79ca44 | [
"MIT"
] | null | null | null | from math import ceil
t = int(input())
for _ in range(t):
row, col = map(int,input().split())
print(ceil((row * col) / 2)) | 21.833333 | 39 | 0.587786 | from math import ceil
t = int(input())
for _ in range(t):
row, col = map(int,input().split())
print(ceil((row * col) / 2)) | true | true |
f72ef03ed69fe6f6031c918f52dc15739624e63f | 50,478 | py | Python | scipy/sparse/compressed.py | ririw/scipy | 680ecf8c52966343827903e6b7983b1ef7323fe2 | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/compressed.py | ririw/scipy | 680ecf8c52966343827903e6b7983b1ef7323fe2 | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/compressed.py | ririw/scipy | 680ecf8c52966343827903e6b7983b1ef7323fe2 | [
"BSD-3-Clause"
] | null | null | null | """Base class for sparse matrix formats using compressed storage."""
from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
csr_sample_values, csr_row_index, csr_row_slice,
csr_column_index1, csr_column_index2)
from ._index import IndexMixin
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, isintlike, get_index_dtype,
downcast_intp_index, get_sum_dtype, check_shape,
matrix, asmatrix, is_pydata_spmatrix)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row- and column-oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self._shape = check_shape(arg1)
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M, N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M, N))[0] + 1,
dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr),
maxval=maxval,
check_contents=True)
self.indices = np.array(indices, copy=copy,
dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized {}_matrix "
"constructor usage".format(self.format))
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception:
raise ValueError("unrecognized {}_matrix constructor usage"
"".format(self.format))
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except Exception:
raise ValueError('unable to infer matrix dimensions')
else:
self._shape = check_shape(self._swap((major_dim,
minor_dim)))
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self._shape = check_shape(other.shape)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name, minor_name = self._swap(('row', 'column'))
major_dim, minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype ({})"
"".format(self.indptr.dtype.name), stacklevel=3)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype ({})"
"".format(self.indices.dtype.name), stacklevel=3)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
if x != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size ({}) should be ({})"
"".format(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("{} index values must be < {}"
"".format(minor_name, minor_dim))
if self.indices.min() < 0:
raise ValueError("{} index values must be >= 0"
"".format(minor_name))
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning,
stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning, stacklevel=3)
# TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other, '_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is"
" inefficient", SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other, '_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self, other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmetic operator overrides #
#################################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes.')
dtype = upcast_char(self.dtype.char, other.dtype.char)
order = self._swap('CF')[0]
result = np.array(other, dtype=dtype, order=order, copy=True)
M, N = self._swap(self.shape)
y = result if result.flags.c_contiguous else result.T
csr_todense(M, N, self.indptr, self.indices, self.data, y)
return matrix(result, copy=False)
def _add_sparse(self, other):
return self._binopt(other, '_plus_')
def _sub_sparse(self, other):
return self._binopt(other, '_minus_')
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1, 1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1, 1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Assume other is a dense matrix/array, which produces a single-item
# object array if other isn't convertible to ndarray.
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0, 0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M, N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools, self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M, N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M, n_vecs),
dtype=upcast_char(self.dtype.char, other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools, self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data,
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M, N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data, indices, indptr), shape=(M, N))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results"
" to a dense matrix.", SparseEfficiencyWarning,
stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum,
'_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum,
'_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc, data=None):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def _get_intXint(self, row, col):
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data,
major, major + 1, minor, minor + 1)
return data.sum(dtype=self.dtype)
def _get_sliceXslice(self, row, col):
major, minor = self._swap((row, col))
if major.step in (1, None) and minor.step in (1, None):
return self._get_submatrix(major, minor, copy=True)
return self._major_slice(major)._minor_slice(minor)
def _get_arrayXarray(self, row, col):
# inner indexing
idx_dtype = self.indices.dtype
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
major = np.asarray(major, dtype=idx_dtype)
minor = np.asarray(minor, dtype=idx_dtype)
val = np.empty(major.size, dtype=self.dtype)
csr_sample_values(M, N, self.indptr, self.indices, self.data,
major.size, major.ravel(), minor.ravel(), val)
if major.ndim == 1:
return asmatrix(val)
return self.__class__(val.reshape(major.shape))
def _get_columnXarray(self, row, col):
# outer indexing
major, minor = self._swap((row, col))
return self._major_index_fancy(major)._minor_index_fancy(minor)
def _major_index_fancy(self, idx):
"""Index along the major axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(M, indices, self.indptr, self.indices, self.data,
res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _major_slice(self, idx, copy=False):
"""Index along the major axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(range(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_index_fancy(self, idx):
"""Index along the minor axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = self._swap(self.shape)
k = len(idx)
new_shape = self._swap((M, k))
if k == 0:
return self.__class__(new_shape)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(self.indptr)
csr_column_index1(k, idx, M, N, self.indptr, self.indices,
col_offsets, res_indptr)
# pass 2: copy indices/data for selected idxs
col_order = np.argsort(idx).astype(idx_dtype, copy=False)
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_column_index2(col_order, col_offsets, len(self.indices),
self.indices, self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_slice(self, idx, copy=False):
"""Index along the minor axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(N)
N = len(range(start, stop, step))
if N == 0:
return self.__class__(self._swap((M, N)))
if step == 1:
return self._get_submatrix(minor=idx, copy=copy)
# TODO: don't fall back to fancy indexing here
return self._minor_index_fancy(np.arange(start, stop, step))
def _get_submatrix(self, major=None, minor=None, copy=False):
"""Return a submatrix of this matrix.
major, minor: None, int, or slice with step 1
"""
M, N = self._swap(self.shape)
i0, i1 = _process_slice(major, M)
j0, j1 = _process_slice(minor, N)
if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
return self.copy() if copy else self
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
def _set_intXint(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray_sparse(self, row, col, x):
# clear entries that will be overwritten
self._zero_many(*self._swap((row, col)))
M, N = row.shape # matches col.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(M), len(r))
c = np.tile(c, M)
x = np.tile(x, M)
if broadcast_col:
r = np.repeat(r, N)
c = np.tile(np.arange(N), len(c))
x = np.repeat(x, N)
# only assign entries in the new sparsity structure
i, j = self._swap((row[r, c], col[r, c]))
self._set_many(i, j, x)
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively, and must not contain
duplicate entries.
"""
i, j, M, N = self._prepare_indices(i, j)
x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a {}_matrix is expensive."
" lil_matrix is more efficient.".format(self.format),
SparseEfficiencyWarning, stacklevel=3)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
nnzs[0] = idx_dtype(0)
indptr_diff = np.diff(self.indptr)
indptr_diff[ui] += new_nnzs
nnzs[1:] = indptr_diff
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
# align ideal order with output array order
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = _sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self, '_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.indices = _prune_array(self.indices[:self.nnz])
self.data = _prune_array(self.data[:self.nnz])
def resize(self, *shape):
shape = check_shape(shape)
if hasattr(self, 'blocksize'):
bm, bn = self.blocksize
new_M, rm = divmod(shape[0], bm)
new_N, rn = divmod(shape[1], bn)
if rm or rn:
raise ValueError("shape must be divisible into %s blocks. "
"Got %s" % (self.blocksize, shape))
M, N = self.shape[0] // bm, self.shape[1] // bn
else:
new_M, new_N = self._swap(shape)
M, N = self._swap(self.shape)
if new_M < M:
self.indices = self.indices[:self.indptr[new_M]]
self.data = self.data[:self.indptr[new_M]]
self.indptr = self.indptr[:new_M + 1]
elif new_M > M:
self.indptr = np.resize(self.indptr, new_M + 1)
self.indptr[M + 1:].fill(self.indptr[M])
if new_N < N:
mask = self.indices < new_N
if not np.all(mask):
self.indices = self.indices[mask]
self.data = self.data[mask]
major_index, val = self._minor_reduce(np.add, mask)
self.indptr.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data, self.indices.copy(),
self.indptr.copy()),
shape=self.shape,
dtype=data.dtype)
else:
return self.__class__((data, self.indices, self.indptr),
shape=self.shape, dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
def _process_slice(sl, num):
if sl is None:
i0, i1 = 0, num
elif isinstance(sl, slice):
i0, i1, stride = sl.indices(num)
if stride != 1:
raise ValueError('slicing with step != 1 not supported')
i0 = min(i0, i1) # give an empty slice when i0 > i1
elif isintlike(sl):
if sl < 0:
sl += num
i0, i1 = sl, sl + 1
if i0 < 0 or i1 > num:
raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
(i0, i1, num))
else:
raise TypeError('expected slice or scalar')
return i0, i1
| 39.039443 | 79 | 0.54398 | from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
csr_sample_values, csr_row_index, csr_row_slice,
csr_column_index1, csr_column_index2)
from ._index import IndexMixin
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, isintlike, get_index_dtype,
downcast_intp_index, get_sum_dtype, check_shape,
matrix, asmatrix, is_pydata_spmatrix)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# create empty matrix
self._shape = check_shape(arg1)
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M, N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M, N))[0] + 1,
dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr),
maxval=maxval,
check_contents=True)
self.indices = np.array(indices, copy=copy,
dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized {}_matrix "
"constructor usage".format(self.format))
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception:
raise ValueError("unrecognized {}_matrix constructor usage"
"".format(self.format))
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except Exception:
raise ValueError('unable to infer matrix dimensions')
else:
self._shape = check_shape(self._swap((major_dim,
minor_dim)))
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self._shape = check_shape(other.shape)
def check_format(self, full_check=True):
# use _swap to determine proper bounds
major_name, minor_name = self._swap(('row', 'column'))
major_dim, minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype ({})"
"".format(self.indptr.dtype.name), stacklevel=3)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype ({})"
"".format(self.indices.dtype.name), stacklevel=3)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
if x != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size ({}) should be ({})"
"".format(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("{} index values must be < {}"
"".format(minor_name, minor_dim))
if self.indices.min() < 0:
raise ValueError("{} index values must be >= 0"
"".format(minor_name))
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning,
stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning, stacklevel=3)
# TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other, '_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is"
" inefficient", SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other, '_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
elif isdense(other):
return op(self.todense(), other)
elif isspmatrix(other):
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self, other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
_scalar(other.toarray()[0, 0])
elif self.shape == (1, 1):
return other._mul_scalar(self.toarray()[0, 0])
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0, 0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M, N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools, self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M, N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M, n_vecs),
dtype=upcast_char(self.dtype.char, other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools, self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data,
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M, N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data, indices, indptr), shape=(M, N))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results"
" to a dense matrix.", SparseEfficiencyWarning,
stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum,
'_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum,
'_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc, data=None):
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def _get_intXint(self, row, col):
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data,
major, major + 1, minor, minor + 1)
return data.sum(dtype=self.dtype)
def _get_sliceXslice(self, row, col):
major, minor = self._swap((row, col))
if major.step in (1, None) and minor.step in (1, None):
return self._get_submatrix(major, minor, copy=True)
return self._major_slice(major)._minor_slice(minor)
def _get_arrayXarray(self, row, col):
# inner indexing
idx_dtype = self.indices.dtype
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
major = np.asarray(major, dtype=idx_dtype)
minor = np.asarray(minor, dtype=idx_dtype)
val = np.empty(major.size, dtype=self.dtype)
csr_sample_values(M, N, self.indptr, self.indices, self.data,
major.size, major.ravel(), minor.ravel(), val)
if major.ndim == 1:
return asmatrix(val)
return self.__class__(val.reshape(major.shape))
def _get_columnXarray(self, row, col):
# outer indexing
major, minor = self._swap((row, col))
return self._major_index_fancy(major)._minor_index_fancy(minor)
def _major_index_fancy(self, idx):
idx_dtype = self.indices.dtype
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(M, indices, self.indptr, self.indices, self.data,
res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _major_slice(self, idx, copy=False):
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(range(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_index_fancy(self, idx):
idx_dtype = self.indices.dtype
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = self._swap(self.shape)
k = len(idx)
new_shape = self._swap((M, k))
if k == 0:
return self.__class__(new_shape)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(self.indptr)
csr_column_index1(k, idx, M, N, self.indptr, self.indices,
col_offsets, res_indptr)
# pass 2: copy indices/data for selected idxs
col_order = np.argsort(idx).astype(idx_dtype, copy=False)
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_column_index2(col_order, col_offsets, len(self.indices),
self.indices, self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_slice(self, idx, copy=False):
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(N)
N = len(range(start, stop, step))
if N == 0:
return self.__class__(self._swap((M, N)))
if step == 1:
return self._get_submatrix(minor=idx, copy=copy)
# TODO: don't fall back to fancy indexing here
return self._minor_index_fancy(np.arange(start, stop, step))
def _get_submatrix(self, major=None, minor=None, copy=False):
M, N = self._swap(self.shape)
i0, i1 = _process_slice(major, M)
j0, j1 = _process_slice(minor, N)
if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
return self.copy() if copy else self
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
def _set_intXint(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray_sparse(self, row, col, x):
self._zero_many(*self._swap((row, col)))
M, N = row.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(M), len(r))
c = np.tile(c, M)
x = np.tile(x, M)
if broadcast_col:
r = np.repeat(r, N)
c = np.tile(np.arange(N), len(c))
x = np.repeat(x, N)
i, j = self._swap((row[r, c], col[r, c]))
self._set_many(i, j, x)
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
def _set_many(self, i, j, x):
i, j, M, N = self._prepare_indices(i, j)
x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a {}_matrix is expensive."
" lil_matrix is more efficient.".format(self.format),
SparseEfficiencyWarning, stacklevel=3)
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
order = np.argsort(i, kind='mergesort')
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
nnzs[0] = idx_dtype(0)
indptr_diff = np.diff(self.indptr)
indptr_diff[ui] += new_nnzs
nnzs[1:] = indptr_diff
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
s__((data, self.indices, self.indptr),
shape=self.shape, dtype=data.dtype)
def _binopt(self, other, op):
other = self.__class__(other)
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
def _process_slice(sl, num):
if sl is None:
i0, i1 = 0, num
elif isinstance(sl, slice):
i0, i1, stride = sl.indices(num)
if stride != 1:
raise ValueError('slicing with step != 1 not supported')
i0 = min(i0, i1) # give an empty slice when i0 > i1
elif isintlike(sl):
if sl < 0:
sl += num
i0, i1 = sl, sl + 1
if i0 < 0 or i1 > num:
raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
(i0, i1, num))
else:
raise TypeError('expected slice or scalar')
return i0, i1
| true | true |
f72ef0d45883357b7389e4970e9395830f94792a | 80 | py | Python | run.py | 5x/ds-ants-geopy-extended | 6017b5da444cc33bde47f0c7cf2cf06a640a354c | [
"MIT"
] | null | null | null | run.py | 5x/ds-ants-geopy-extended | 6017b5da444cc33bde47f0c7cf2cf06a640a354c | [
"MIT"
] | null | null | null | run.py | 5x/ds-ants-geopy-extended | 6017b5da444cc33bde47f0c7cf2cf06a640a354c | [
"MIT"
] | 1 | 2019-12-06T10:20:08.000Z | 2019-12-06T10:20:08.000Z | from ants.ants import demonstrate
if __name__ == '__main__':
demonstrate()
| 16 | 33 | 0.725 | from ants.ants import demonstrate
if __name__ == '__main__':
demonstrate()
| true | true |
f72ef1faaa7d4dac60adb50472f713f106ff3158 | 758 | py | Python | venv/Lib/site-packages/pandas/tests/tseries/offsets/common.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | 1 | 2021-02-06T21:00:00.000Z | 2021-02-06T21:00:00.000Z | venv/Lib/site-packages/pandas/tests/tseries/offsets/common.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/tseries/offsets/common.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | """
Assertion helpers for offsets tests
"""
def assert_offset_equal(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset.apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError:
raise AssertionError(
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {base}"
)
def assert_is_on_offset(offset, date, expected):
actual = offset.is_on_offset(date)
assert actual == expected, (
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {date}"
)
| 28.074074 | 79 | 0.60686 |
def assert_offset_equal(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset.apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError:
raise AssertionError(
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {base}"
)
def assert_is_on_offset(offset, date, expected):
actual = offset.is_on_offset(date)
assert actual == expected, (
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {date}"
)
| true | true |
f72ef247c55be69d301aa864fe18c01087abc773 | 587 | py | Python | conftest.py | hackoregon/neighborhoods-2018 | 2e1bf837365182a212527e1afbedddc759d0ee78 | [
"MIT"
] | 6 | 2018-03-16T00:06:31.000Z | 2019-06-02T21:27:03.000Z | conftest.py | hackoregon/neighborhoods-2018 | 2e1bf837365182a212527e1afbedddc759d0ee78 | [
"MIT"
] | 53 | 2018-03-16T03:29:04.000Z | 2021-06-10T20:17:11.000Z | conftest.py | hackoregon/neighborhoods-2018 | 2e1bf837365182a212527e1afbedddc759d0ee78 | [
"MIT"
] | 1 | 2018-06-20T16:03:40.000Z | 2018-06-20T16:03:40.000Z | import pytest
import os
import neighborhoods_backend
@pytest.fixture(scope='session')
def django_db_setup():
neighborhoods_backend.settings.DATABASES['default'] = {
'ENGINE': 'django_db_geventpool.backends.postgresql_psycopg2',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'NAME': os.environ.get('POSTGRES_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'),
'CONN_MAX_AGE': 0,
'OPTIONS': {
'MAX_CONNS': 20
}
}
| 29.35 | 70 | 0.633731 | import pytest
import os
import neighborhoods_backend
@pytest.fixture(scope='session')
def django_db_setup():
neighborhoods_backend.settings.DATABASES['default'] = {
'ENGINE': 'django_db_geventpool.backends.postgresql_psycopg2',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'NAME': os.environ.get('POSTGRES_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'),
'CONN_MAX_AGE': 0,
'OPTIONS': {
'MAX_CONNS': 20
}
}
| true | true |
f72ef2fc7fec4b7efa2f516ad4626de6d8822b3b | 1,605 | py | Python | filter_from_list.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 13 | 2019-12-09T07:56:13.000Z | 2021-08-03T01:45:53.000Z | filter_from_list.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 1 | 2020-04-29T00:00:14.000Z | 2021-07-09T14:24:19.000Z | filter_from_list.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 3 | 2020-04-27T15:36:36.000Z | 2021-03-29T17:52:35.000Z | # -*- coding: utf-8 -*-
import argparse
import inspect
import math
import os
from pprint import pprint
import sys
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="tmp/samples.csv", help="Input file")
parser.add_argument('-blist', dest="BLACK_LIST", default="", help="CSV file for blacklist of entries")
parser.add_argument('-wlist', dest="WHITE_LIST", default="", help="CSV file for whitelist of entries")
parser.add_argument('-key', dest="KEY", default="id", help="Key to match on")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just show details?")
parser.add_argument('-out', dest="OUTPUT_FILE", default="", help="File to output results; leave empty to update input file")
a = parser.parse_args()
OUTPUT_FILE = a.OUTPUT_FILE if len(a.OUTPUT_FILE) > 0 else a.INPUT_FILE
# Read files
fieldNames, rows = readCsv(a.INPUT_FILE)
rowCount = len(rows)
if len(a.BLACK_LIST) > 0:
_, blist = readCsv(a.BLACK_LIST)
bids = set([item[a.KEY] for item in blist])
rows = [item for item in rows if item[a.KEY] not in bids]
rowCount = len(rows)
print("%s rows after blacklist filtering" % rowCount)
if len(a.WHITE_LIST) > 0:
_, wlist = readCsv(a.WHITE_LIST)
wids = set([item[a.KEY] for item in wlist])
rows = [item for item in rows if item[a.KEY] in wids]
rowCount = len(rows)
print("%s rows after whitelist filtering" % rowCount)
if a.PROBE:
sys.exit()
writeCsv(OUTPUT_FILE, rows, headings=fieldNames)
| 33.4375 | 124 | 0.709034 |
import argparse
import inspect
import math
import os
from pprint import pprint
import sys
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="tmp/samples.csv", help="Input file")
parser.add_argument('-blist', dest="BLACK_LIST", default="", help="CSV file for blacklist of entries")
parser.add_argument('-wlist', dest="WHITE_LIST", default="", help="CSV file for whitelist of entries")
parser.add_argument('-key', dest="KEY", default="id", help="Key to match on")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just show details?")
parser.add_argument('-out', dest="OUTPUT_FILE", default="", help="File to output results; leave empty to update input file")
a = parser.parse_args()
OUTPUT_FILE = a.OUTPUT_FILE if len(a.OUTPUT_FILE) > 0 else a.INPUT_FILE
fieldNames, rows = readCsv(a.INPUT_FILE)
rowCount = len(rows)
if len(a.BLACK_LIST) > 0:
_, blist = readCsv(a.BLACK_LIST)
bids = set([item[a.KEY] for item in blist])
rows = [item for item in rows if item[a.KEY] not in bids]
rowCount = len(rows)
print("%s rows after blacklist filtering" % rowCount)
if len(a.WHITE_LIST) > 0:
_, wlist = readCsv(a.WHITE_LIST)
wids = set([item[a.KEY] for item in wlist])
rows = [item for item in rows if item[a.KEY] in wids]
rowCount = len(rows)
print("%s rows after whitelist filtering" % rowCount)
if a.PROBE:
sys.exit()
writeCsv(OUTPUT_FILE, rows, headings=fieldNames)
| true | true |
f72ef3946956775383152170dd5a17cfd84646d4 | 5,126 | py | Python | src/models.py | athatheo/House-GANs-Reproduction | 00cc807f1e74f88eef5ed81615bfd87a39c52f94 | [
"MIT"
] | null | null | null | src/models.py | athatheo/House-GANs-Reproduction | 00cc807f1e74f88eef5ed81615bfd87a39c52f94 | [
"MIT"
] | null | null | null | src/models.py | athatheo/House-GANs-Reproduction | 00cc807f1e74f88eef5ed81615bfd87a39c52f94 | [
"MIT"
] | null | null | null | import torch
from torch import cat
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import Module
from torch.nn import ConvTranspose2d
from torch.nn import LeakyReLU
from torch.nn import Tanh
from torch.nn import MaxPool2d
from torch import zeros_like
class ConvMPN(Module):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(in_channels=3*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv2 = Conv2d(in_channels=2*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv3 = Conv2d(in_channels=2*16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.leaky_relu = LeakyReLU(0.1)
def get_nodes(self, feature_vectors, edges, include_neighbours=True):
device = feature_vectors.device
nodes = zeros_like(feature_vectors, device=device)
if include_neighbours:
index = torch.where(edges[:, 1] > 0)
else:
index = torch.where(edges[:, 1] < 0)
src = torch.cat([edges[index[0], 0], edges[index[0], 2]]).long()
dst = torch.cat([edges[index[0], 2], edges[index[0], 0]]).long()
src = feature_vectors[src.contiguous()]
dst = dst.view(-1, 1, 1, 1).expand_as(src).to(device)
return nodes.scatter_add(0, dst, src)
def cat_nodes(self, feature_vectors, edges):
neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=True, )
non_neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=False)
encoding = torch.cat([feature_vectors, neighbouring_nodes, non_neighbouring_nodes], 1)
return encoding
def forward(self, x, edges):
x = self.cat_nodes(x, edges)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
return x
class Generator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(138, 1024)
self.conv_mpn_1 = ConvMPN()
self.upsample_1 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.upsample_2 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_1 = Conv2d(16, 256, 3, 1, 1)
self.leaky_relu = LeakyReLU(0.1)
self.conv_2 = Conv2d(256, 128, 3, 1, 1)
self.conv_3 = Conv2d(128, 1, 3, 1, 1)
self.tanh = Tanh()
def forward(self, z, t, edges):
z = z.view(-1, 128)#
t = t.view(-1, 10) #
x = cat([z, t], 1)
x = self.linear_reshape_1(x)
x = x.view(-1, 16, 8, 8)
x = self.conv_mpn_1(x, edges).view(-1, *x.shape[1:])
x = self.upsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges).view(-1, *x.shape[1:])
x = self.upsample_2(x)
x = self.leaky_relu(x)
x = self.conv_1(x.view(-1, x.shape[1], *x.shape[2:]))
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.tanh(x)
x = x.view(-1, *x.shape[2:])
return x
class Discriminator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(10, 8192)
self.leaky_relu = LeakyReLU(0.1)
self.conv_1 = Conv2d(9, 16, 3, 1, 1, bias=True)
self.conv_2 = Conv2d(16, 16, 3, 1, 1)
self.conv_3 = Conv2d(16, 16, 3, 1, 1)
self.conv_mpn_1 = ConvMPN()
self.downsample_1 = Conv2d(16, 16, 3, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.downsample_2 = Conv2d(16, 16, 3, 2, 1)
self.dec_conv_1 = Conv2d(16, 256, 3, 2, 1)
self.dec_conv_2 = Conv2d(256, 128, 3, 2, 1)
self.dec_conv_3 = Conv2d(128, 128, 3, 2, 1)
self.pool_reshape_linear = Linear(128, 1)
def add_pool(self, x, nd_to_sample):
dtype, device = x.dtype, x.device
batch_size = torch.max(nd_to_sample) + 1
pooled_x = torch.zeros(batch_size, x.shape[-1], device=device).float()
pool_to = nd_to_sample.view(-1, 1).expand_as(x).to(device)
pooled_x = pooled_x.scatter_add(0, pool_to, x)
return pooled_x
def forward(self, x, t, edges, nd_to_sample):
x = x.view(-1, 1, 32, 32)
t = self.linear_reshape_1(t)
t = t.view(-1, 8, 32, 32)
x = cat([x, t], 1)
x = self.conv_1(x)
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.leaky_relu(x)
x = self.conv_mpn_1(x, edges)
x = self.downsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges)
x = self.downsample_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_1(x)
x = self.leaky_relu(x)
x = self.dec_conv_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_3(x)
x = self.leaky_relu(x)
x = x.view(-1, x.shape[1])
x = self.add_pool(x, nd_to_sample)
x = self.pool_reshape_linear(x)
return x
| 36.35461 | 110 | 0.584666 | import torch
from torch import cat
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import Module
from torch.nn import ConvTranspose2d
from torch.nn import LeakyReLU
from torch.nn import Tanh
from torch.nn import MaxPool2d
from torch import zeros_like
class ConvMPN(Module):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(in_channels=3*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv2 = Conv2d(in_channels=2*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv3 = Conv2d(in_channels=2*16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.leaky_relu = LeakyReLU(0.1)
def get_nodes(self, feature_vectors, edges, include_neighbours=True):
device = feature_vectors.device
nodes = zeros_like(feature_vectors, device=device)
if include_neighbours:
index = torch.where(edges[:, 1] > 0)
else:
index = torch.where(edges[:, 1] < 0)
src = torch.cat([edges[index[0], 0], edges[index[0], 2]]).long()
dst = torch.cat([edges[index[0], 2], edges[index[0], 0]]).long()
src = feature_vectors[src.contiguous()]
dst = dst.view(-1, 1, 1, 1).expand_as(src).to(device)
return nodes.scatter_add(0, dst, src)
def cat_nodes(self, feature_vectors, edges):
neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=True, )
non_neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=False)
encoding = torch.cat([feature_vectors, neighbouring_nodes, non_neighbouring_nodes], 1)
return encoding
def forward(self, x, edges):
x = self.cat_nodes(x, edges)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
return x
class Generator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(138, 1024)
self.conv_mpn_1 = ConvMPN()
self.upsample_1 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.upsample_2 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_1 = Conv2d(16, 256, 3, 1, 1)
self.leaky_relu = LeakyReLU(0.1)
self.conv_2 = Conv2d(256, 128, 3, 1, 1)
self.conv_3 = Conv2d(128, 1, 3, 1, 1)
self.tanh = Tanh()
def forward(self, z, t, edges):
z = z.view(-1, 128)
t = t.view(-1, 10)
x = cat([z, t], 1)
x = self.linear_reshape_1(x)
x = x.view(-1, 16, 8, 8)
x = self.conv_mpn_1(x, edges).view(-1, *x.shape[1:])
x = self.upsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges).view(-1, *x.shape[1:])
x = self.upsample_2(x)
x = self.leaky_relu(x)
x = self.conv_1(x.view(-1, x.shape[1], *x.shape[2:]))
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.tanh(x)
x = x.view(-1, *x.shape[2:])
return x
class Discriminator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(10, 8192)
self.leaky_relu = LeakyReLU(0.1)
self.conv_1 = Conv2d(9, 16, 3, 1, 1, bias=True)
self.conv_2 = Conv2d(16, 16, 3, 1, 1)
self.conv_3 = Conv2d(16, 16, 3, 1, 1)
self.conv_mpn_1 = ConvMPN()
self.downsample_1 = Conv2d(16, 16, 3, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.downsample_2 = Conv2d(16, 16, 3, 2, 1)
self.dec_conv_1 = Conv2d(16, 256, 3, 2, 1)
self.dec_conv_2 = Conv2d(256, 128, 3, 2, 1)
self.dec_conv_3 = Conv2d(128, 128, 3, 2, 1)
self.pool_reshape_linear = Linear(128, 1)
def add_pool(self, x, nd_to_sample):
dtype, device = x.dtype, x.device
batch_size = torch.max(nd_to_sample) + 1
pooled_x = torch.zeros(batch_size, x.shape[-1], device=device).float()
pool_to = nd_to_sample.view(-1, 1).expand_as(x).to(device)
pooled_x = pooled_x.scatter_add(0, pool_to, x)
return pooled_x
def forward(self, x, t, edges, nd_to_sample):
x = x.view(-1, 1, 32, 32)
t = self.linear_reshape_1(t)
t = t.view(-1, 8, 32, 32)
x = cat([x, t], 1)
x = self.conv_1(x)
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.leaky_relu(x)
x = self.conv_mpn_1(x, edges)
x = self.downsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges)
x = self.downsample_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_1(x)
x = self.leaky_relu(x)
x = self.dec_conv_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_3(x)
x = self.leaky_relu(x)
x = x.view(-1, x.shape[1])
x = self.add_pool(x, nd_to_sample)
x = self.pool_reshape_linear(x)
return x
| true | true |
f72ef61fc1dcc54611562b5350866141b7262a8c | 507 | py | Python | build/summit_xl_common/summit_xl_localization/catkin_generated/pkg.develspace.context.pc.py | Jam-cpu/Masters-Project---Final | 0b266b1f117a579b96507249f0a128d0e3cc082a | [
"BSD-3-Clause-Clear"
] | null | null | null | build/summit_xl_common/summit_xl_localization/catkin_generated/pkg.develspace.context.pc.py | Jam-cpu/Masters-Project---Final | 0b266b1f117a579b96507249f0a128d0e3cc082a | [
"BSD-3-Clause-Clear"
] | null | null | null | build/summit_xl_common/summit_xl_localization/catkin_generated/pkg.develspace.context.pc.py | Jam-cpu/Masters-Project---Final | 0b266b1f117a579b96507249f0a128d0e3cc082a | [
"BSD-3-Clause-Clear"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "robot_localization;roscpp;tf;tf2;tf2_ros;message_filters;std_msgs;std_srvs;geometry_msgs;nav_msgs;sensor_msgs;robotnik_msgs;mavros_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "summit_xl_localization"
PROJECT_SPACE_DIR = "/workspace/devel"
PROJECT_VERSION = "1.1.3"
| 56.333333 | 180 | 0.763314 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "robot_localization;roscpp;tf;tf2;tf2_ros;message_filters;std_msgs;std_srvs;geometry_msgs;nav_msgs;sensor_msgs;robotnik_msgs;mavros_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "summit_xl_localization"
PROJECT_SPACE_DIR = "/workspace/devel"
PROJECT_VERSION = "1.1.3"
| true | true |
f72ef6e9a81af96f8effa83530a460f9a5ebed9d | 38,041 | py | Python | pybind/slxos/v16r_1_00b/mpls_state/lsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/mpls_state/lsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/mpls_state/lsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import basic
import forwarding
import frr
import backup
import instances
import secondary_path
class lsp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/lsp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: MPLS LSP operational information
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name','__lsp_type','__bypass_lsp_type','__interface','__history','__basic','__forwarding','__frr','__backup','__instances','__secondary_path',)
_yang_name = 'lsp'
_rest_name = 'lsp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'lsp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'lsp']
def _get_lsp_name(self):
"""
Getter method for lsp_name, mapped from YANG variable /mpls_state/lsp/lsp_name (string)
YANG Description: LSP Name
"""
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
"""
Setter method for lsp_name, mapped from YANG variable /mpls_state/lsp/lsp_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_name() directly.
YANG Description: LSP Name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_lsp_type(self):
"""
Getter method for lsp_type, mapped from YANG variable /mpls_state/lsp/lsp_type (lsp-type)
YANG Description: LSP Type
"""
return self.__lsp_type
def _set_lsp_type(self, v, load=False):
"""
Setter method for lsp_type, mapped from YANG variable /mpls_state/lsp/lsp_type (lsp-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_type() directly.
YANG Description: LSP Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_type must be of a type compatible with lsp-type""",
'defined-type': "brocade-mpls-operational:lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)""",
})
self.__lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_type(self):
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
def _get_bypass_lsp_type(self):
"""
Getter method for bypass_lsp_type, mapped from YANG variable /mpls_state/lsp/bypass_lsp_type (bypass-lsp-type)
YANG Description: Bypass LSP Type
"""
return self.__bypass_lsp_type
def _set_bypass_lsp_type(self, v, load=False):
"""
Setter method for bypass_lsp_type, mapped from YANG variable /mpls_state/lsp/bypass_lsp_type (bypass-lsp-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_bypass_lsp_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bypass_lsp_type() directly.
YANG Description: Bypass LSP Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bypass_lsp_type must be of a type compatible with bypass-lsp-type""",
'defined-type': "brocade-mpls-operational:bypass-lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)""",
})
self.__bypass_lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_bypass_lsp_type(self):
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /mpls_state/lsp/interface (string)
YANG Description: Bypass LSP interface
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /mpls_state/lsp/interface (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Bypass LSP interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_history(self):
"""
Getter method for history, mapped from YANG variable /mpls_state/lsp/history (string)
YANG Description: MPLS LSP history Information
"""
return self.__history
def _set_history(self, v, load=False):
"""
Setter method for history, mapped from YANG variable /mpls_state/lsp/history (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_history is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_history() directly.
YANG Description: MPLS LSP history Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """history must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__history = t
if hasattr(self, '_set'):
self._set()
def _unset_history(self):
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_basic(self):
"""
Getter method for basic, mapped from YANG variable /mpls_state/lsp/basic (container)
YANG Description: MPLS basic LSP operational information
"""
return self.__basic
def _set_basic(self, v, load=False):
"""
Setter method for basic, mapped from YANG variable /mpls_state/lsp/basic (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_basic is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_basic() directly.
YANG Description: MPLS basic LSP operational information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """basic must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__basic = t
if hasattr(self, '_set'):
self._set()
def _unset_basic(self):
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_forwarding(self):
"""
Getter method for forwarding, mapped from YANG variable /mpls_state/lsp/forwarding (container)
YANG Description: MPLS LSP forwarding information
"""
return self.__forwarding
def _set_forwarding(self, v, load=False):
"""
Setter method for forwarding, mapped from YANG variable /mpls_state/lsp/forwarding (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_forwarding is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forwarding() directly.
YANG Description: MPLS LSP forwarding information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """forwarding must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__forwarding = t
if hasattr(self, '_set'):
self._set()
def _unset_forwarding(self):
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_frr(self):
"""
Getter method for frr, mapped from YANG variable /mpls_state/lsp/frr (container)
YANG Description: MPLS LSP FRR information
"""
return self.__frr
def _set_frr(self, v, load=False):
"""
Setter method for frr, mapped from YANG variable /mpls_state/lsp/frr (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_frr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_frr() directly.
YANG Description: MPLS LSP FRR information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """frr must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__frr = t
if hasattr(self, '_set'):
self._set()
def _unset_frr(self):
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_backup(self):
"""
Getter method for backup, mapped from YANG variable /mpls_state/lsp/backup (container)
YANG Description: MPLS LSP detail backup information
"""
return self.__backup
def _set_backup(self, v, load=False):
"""
Setter method for backup, mapped from YANG variable /mpls_state/lsp/backup (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_backup is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_backup() directly.
YANG Description: MPLS LSP detail backup information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """backup must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__backup = t
if hasattr(self, '_set'):
self._set()
def _unset_backup(self):
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_instances(self):
"""
Getter method for instances, mapped from YANG variable /mpls_state/lsp/instances (list)
YANG Description: MPLS LSP instancses information
"""
return self.__instances
def _set_instances(self, v, load=False):
"""
Setter method for instances, mapped from YANG variable /mpls_state/lsp/instances (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_instances is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_instances() directly.
YANG Description: MPLS LSP instancses information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """instances must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__instances = t
if hasattr(self, '_set'):
self._set()
def _unset_instances(self):
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_secondary_path(self):
"""
Getter method for secondary_path, mapped from YANG variable /mpls_state/lsp/secondary_path (list)
YANG Description: MPLS LSP secondary path information
"""
return self.__secondary_path
def _set_secondary_path(self, v, load=False):
"""
Setter method for secondary_path, mapped from YANG variable /mpls_state/lsp/secondary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_secondary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_secondary_path() directly.
YANG Description: MPLS LSP secondary path information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """secondary_path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__secondary_path = t
if hasattr(self, '_set'):
self._set()
def _unset_secondary_path(self):
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
lsp_name = __builtin__.property(_get_lsp_name)
lsp_type = __builtin__.property(_get_lsp_type)
bypass_lsp_type = __builtin__.property(_get_bypass_lsp_type)
interface = __builtin__.property(_get_interface)
history = __builtin__.property(_get_history)
basic = __builtin__.property(_get_basic)
forwarding = __builtin__.property(_get_forwarding)
frr = __builtin__.property(_get_frr)
backup = __builtin__.property(_get_backup)
instances = __builtin__.property(_get_instances)
secondary_path = __builtin__.property(_get_secondary_path)
_pyangbind_elements = {'lsp_name': lsp_name, 'lsp_type': lsp_type, 'bypass_lsp_type': bypass_lsp_type, 'interface': interface, 'history': history, 'basic': basic, 'forwarding': forwarding, 'frr': frr, 'backup': backup, 'instances': instances, 'secondary_path': secondary_path, }
| 71.775472 | 784 | 0.72761 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import basic
import forwarding
import frr
import backup
import instances
import secondary_path
class lsp(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name','__lsp_type','__bypass_lsp_type','__interface','__history','__basic','__forwarding','__frr','__backup','__instances','__secondary_path',)
_yang_name = 'lsp'
_rest_name = 'lsp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'lsp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'lsp']
def _get_lsp_name(self):
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_lsp_type(self):
return self.__lsp_type
def _set_lsp_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_type must be of a type compatible with lsp-type""",
'defined-type': "brocade-mpls-operational:lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)""",
})
self.__lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_type(self):
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
def _get_bypass_lsp_type(self):
return self.__bypass_lsp_type
def _set_bypass_lsp_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bypass_lsp_type must be of a type compatible with bypass-lsp-type""",
'defined-type': "brocade-mpls-operational:bypass-lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)""",
})
self.__bypass_lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_bypass_lsp_type(self):
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
def _get_interface(self):
return self.__interface
def _set_interface(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_history(self):
return self.__history
def _set_history(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """history must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__history = t
if hasattr(self, '_set'):
self._set()
def _unset_history(self):
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_basic(self):
return self.__basic
def _set_basic(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """basic must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__basic = t
if hasattr(self, '_set'):
self._set()
def _unset_basic(self):
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_forwarding(self):
return self.__forwarding
def _set_forwarding(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """forwarding must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__forwarding = t
if hasattr(self, '_set'):
self._set()
def _unset_forwarding(self):
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_frr(self):
return self.__frr
def _set_frr(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """frr must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__frr = t
if hasattr(self, '_set'):
self._set()
def _unset_frr(self):
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_backup(self):
return self.__backup
def _set_backup(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """backup must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__backup = t
if hasattr(self, '_set'):
self._set()
def _unset_backup(self):
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_instances(self):
return self.__instances
def _set_instances(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """instances must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__instances = t
if hasattr(self, '_set'):
self._set()
def _unset_instances(self):
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_secondary_path(self):
return self.__secondary_path
def _set_secondary_path(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """secondary_path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__secondary_path = t
if hasattr(self, '_set'):
self._set()
def _unset_secondary_path(self):
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
lsp_name = __builtin__.property(_get_lsp_name)
lsp_type = __builtin__.property(_get_lsp_type)
bypass_lsp_type = __builtin__.property(_get_bypass_lsp_type)
interface = __builtin__.property(_get_interface)
history = __builtin__.property(_get_history)
basic = __builtin__.property(_get_basic)
forwarding = __builtin__.property(_get_forwarding)
frr = __builtin__.property(_get_frr)
backup = __builtin__.property(_get_backup)
instances = __builtin__.property(_get_instances)
secondary_path = __builtin__.property(_get_secondary_path)
_pyangbind_elements = {'lsp_name': lsp_name, 'lsp_type': lsp_type, 'bypass_lsp_type': bypass_lsp_type, 'interface': interface, 'history': history, 'basic': basic, 'forwarding': forwarding, 'frr': frr, 'backup': backup, 'instances': instances, 'secondary_path': secondary_path, }
| true | true |
f72ef7eb0ddb67009fc21687759d869de7d0b79e | 456 | py | Python | stubs.min/System/Windows/Forms/__init___parts/HScrollProperties.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Forms/__init___parts/HScrollProperties.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Forms/__init___parts/HScrollProperties.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class HScrollProperties(ScrollProperties):
"""
Provides basic properties for the System.Windows.Forms.HScrollBar
HScrollProperties(container: ScrollableControl)
"""
@staticmethod
def __new__(self,container):
""" __new__(cls: type,container: ScrollableControl) """
pass
ParentControl=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the control to which this scroll information applies.
"""
| 26.823529 | 85 | 0.72807 | class HScrollProperties(ScrollProperties):
""" __new__(cls: type,container: ScrollableControl) """
pass
"""Gets the control to which this scroll information applies.
| true | true |
f72ef7f4af6964877bdcb43c3b93166aefe758c0 | 1,273 | py | Python | tests/test_bayesian.py | eric-erki/autokeras | d365a04af7f41641c4b0634fc076f6dbe2364d53 | [
"MIT"
] | 1 | 2018-08-06T03:57:51.000Z | 2018-08-06T03:57:51.000Z | tests/test_bayesian.py | eric-erki/autokeras | d365a04af7f41641c4b0634fc076f6dbe2364d53 | [
"MIT"
] | null | null | null | tests/test_bayesian.py | eric-erki/autokeras | d365a04af7f41641c4b0634fc076f6dbe2364d53 | [
"MIT"
] | 1 | 2018-10-11T03:43:41.000Z | 2018-10-11T03:43:41.000Z | from autokeras.bayesian import *
from tests.common import get_add_skip_model, get_concat_skip_model, get_conv_dense_model
def test_edit_distance():
descriptor1 = get_add_skip_model().extract_descriptor()
descriptor2 = get_concat_skip_model().extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 2.0
def test_edit_distance2():
descriptor1 = get_conv_dense_model().extract_descriptor()
graph = get_conv_dense_model()
graph.to_conv_deeper_model(1, 3)
graph.to_wider_model(5, 6)
graph.to_wider_model(17, 3)
descriptor2 = graph.extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 1.5
def test_bourgain_embedding():
assert bourgain_embedding_matrix([[0]]).shape == (1, 1)
assert bourgain_embedding_matrix([[1, 0], [0, 1]]).shape == (2, 2)
def test_gpr():
gpr = IncrementalGaussianProcess(1.0)
gpr.first_fit([get_add_skip_model().extract_descriptor()], [0.5])
assert gpr.first_fitted
gpr.incremental_fit([get_concat_skip_model().extract_descriptor()], [0.6])
assert abs(gpr.predict(np.array([get_add_skip_model().extract_descriptor()]))[0] - 0.5) < 1e-4
assert abs(gpr.predict(np.array([get_concat_skip_model().extract_descriptor()]))[0] - 0.6) < 1e-4
| 37.441176 | 101 | 0.732914 | from autokeras.bayesian import *
from tests.common import get_add_skip_model, get_concat_skip_model, get_conv_dense_model
def test_edit_distance():
descriptor1 = get_add_skip_model().extract_descriptor()
descriptor2 = get_concat_skip_model().extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 2.0
def test_edit_distance2():
descriptor1 = get_conv_dense_model().extract_descriptor()
graph = get_conv_dense_model()
graph.to_conv_deeper_model(1, 3)
graph.to_wider_model(5, 6)
graph.to_wider_model(17, 3)
descriptor2 = graph.extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 1.5
def test_bourgain_embedding():
assert bourgain_embedding_matrix([[0]]).shape == (1, 1)
assert bourgain_embedding_matrix([[1, 0], [0, 1]]).shape == (2, 2)
def test_gpr():
gpr = IncrementalGaussianProcess(1.0)
gpr.first_fit([get_add_skip_model().extract_descriptor()], [0.5])
assert gpr.first_fitted
gpr.incremental_fit([get_concat_skip_model().extract_descriptor()], [0.6])
assert abs(gpr.predict(np.array([get_add_skip_model().extract_descriptor()]))[0] - 0.5) < 1e-4
assert abs(gpr.predict(np.array([get_concat_skip_model().extract_descriptor()]))[0] - 0.6) < 1e-4
| true | true |
f72ef82aa0bb7f97b0195399046eb262d8c67ee3 | 17,244 | py | Python | pymongo/operations.py | james00209/mongodb-mongo-python-driver | bf6af9fd77bc13c0668018500071992e9e5ba05b | [
"Apache-2.0"
] | 1 | 2020-08-04T08:30:25.000Z | 2020-08-04T08:30:25.000Z | pymongo/operations.py | james00209/mongodb-mongo-python-driver | bf6af9fd77bc13c0668018500071992e9e5ba05b | [
"Apache-2.0"
] | null | null | null | pymongo/operations.py | james00209/mongodb-mongo-python-driver | bf6af9fd77bc13c0668018500071992e9e5ba05b | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operation class definitions."""
from bson.py3compat import string_type
from pymongo import helpers
from pymongo.common import validate_boolean, validate_is_mapping, validate_list
from pymongo.collation import validate_collation_or_none
from pymongo.helpers import _gen_index_name, _index_document, _index_list
class InsertOne(object):
"""Represents an insert_one operation."""
__slots__ = ("_doc",)
def __init__(self, document):
"""Create an InsertOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `document`: The document to insert. If the document is missing an
_id field one will be added.
"""
self._doc = document
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_insert(self._doc)
def __repr__(self):
return "InsertOne(%r)" % (self._doc,)
def __eq__(self, other):
if type(other) == type(self):
return other._doc == self._doc
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteOne(object):
"""Represents a delete_one operation."""
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
"""Create a DeleteOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_delete(self._filter, 1, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteOne(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteMany(object):
"""Represents a delete_many operation."""
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
"""Create a DeleteMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_delete(self._filter, 0, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteMany(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class ReplaceOne(object):
"""Represents a replace_one operation."""
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint")
def __init__(self, filter, replacement, upsert=False, collation=None,
hint=None):
"""Create a ReplaceOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.5
Added the ``collation`` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = replacement
self._upsert = upsert
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_replace(self._filter, self._doc, self._upsert,
collation=self._collation, hint=self._hint)
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._hint) == (self._filter, self._doc, self._upsert,
self._collation, other._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._hint)
class _UpdateOp(object):
"""Private base class for update operations."""
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters",
"_hint")
def __init__(self, filter, doc, upsert, collation, array_filters, hint):
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if array_filters is not None:
validate_list("array_filters", array_filters)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = doc
self._upsert = upsert
self._collation = collation
self._array_filters = array_filters
self._hint = hint
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._array_filters, other._hint) ==
(self._filter, self._doc, self._upsert, self._collation,
self._array_filters, self._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._array_filters, self._hint)
class UpdateOne(_UpdateOp):
"""Represents an update_one operation."""
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
"""Represents an update_one operation.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
.. versionchanged:: 3.11
Added the `hint` option.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added the `array_filters` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
super(UpdateOne, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, False, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class UpdateMany(_UpdateOp):
"""Represents an update_many operation."""
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
"""Create an UpdateMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
.. versionchanged:: 3.11
Added the `hint` option.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added the `array_filters` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
super(UpdateMany, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, True, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class IndexModel(object):
"""Represents an index to create."""
__slots__ = ("__document",)
def __init__(self, keys, **kwargs):
"""Create an Index instance.
For use with :meth:`~pymongo.collection.Collection.create_indexes`.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index. Requires server version >= 3.2.
- `collation`: An instance of :class:`~pymongo.collation.Collation`
that specifies the collation to use in MongoDB >= 3.4.
- `wildcardProjection`: Allows users to include or exclude specific
field paths from a `wildcard index`_ using the { "$**" : 1} key
pattern. Requires server version >= 4.2.
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core
"""
keys = _index_list(keys)
if "name" not in kwargs:
kwargs["name"] = _gen_index_name(keys)
kwargs["key"] = _index_document(keys)
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__document = kwargs
if collation is not None:
self.__document['collation'] = collation
@property
def document(self):
"""An index document suitable for passing to the createIndexes
command.
"""
return self.__document
| 38.577181 | 100 | 0.603978 |
from bson.py3compat import string_type
from pymongo import helpers
from pymongo.common import validate_boolean, validate_is_mapping, validate_list
from pymongo.collation import validate_collation_or_none
from pymongo.helpers import _gen_index_name, _index_document, _index_list
class InsertOne(object):
__slots__ = ("_doc",)
def __init__(self, document):
self._doc = document
def _add_to_bulk(self, bulkobj):
bulkobj.add_insert(self._doc)
def __repr__(self):
return "InsertOne(%r)" % (self._doc,)
def __eq__(self, other):
if type(other) == type(self):
return other._doc == self._doc
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteOne(object):
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
bulkobj.add_delete(self._filter, 1, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteOne(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteMany(object):
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
bulkobj.add_delete(self._filter, 0, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteMany(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class ReplaceOne(object):
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint")
def __init__(self, filter, replacement, upsert=False, collation=None,
hint=None):
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = replacement
self._upsert = upsert
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
bulkobj.add_replace(self._filter, self._doc, self._upsert,
collation=self._collation, hint=self._hint)
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._hint) == (self._filter, self._doc, self._upsert,
self._collation, other._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._hint)
class _UpdateOp(object):
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters",
"_hint")
def __init__(self, filter, doc, upsert, collation, array_filters, hint):
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if array_filters is not None:
validate_list("array_filters", array_filters)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = doc
self._upsert = upsert
self._collation = collation
self._array_filters = array_filters
self._hint = hint
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._array_filters, other._hint) ==
(self._filter, self._doc, self._upsert, self._collation,
self._array_filters, self._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._array_filters, self._hint)
class UpdateOne(_UpdateOp):
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
super(UpdateOne, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
bulkobj.add_update(self._filter, self._doc, False, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class UpdateMany(_UpdateOp):
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
super(UpdateMany, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
bulkobj.add_update(self._filter, self._doc, True, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class IndexModel(object):
__slots__ = ("__document",)
def __init__(self, keys, **kwargs):
keys = _index_list(keys)
if "name" not in kwargs:
kwargs["name"] = _gen_index_name(keys)
kwargs["key"] = _index_document(keys)
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__document = kwargs
if collation is not None:
self.__document['collation'] = collation
@property
def document(self):
return self.__document
| true | true |
f72ef8d492f94e15b347ad4d22bde114fd4df114 | 9,789 | py | Python | cartography/classification/glue_utils.py | dyahadila/ood_cartography | ff65bf2b1a170e2913f0019a15af3398a1808f0f | [
"Apache-2.0"
] | null | null | null | cartography/classification/glue_utils.py | dyahadila/ood_cartography | ff65bf2b1a170e2913f0019a15af3398a1808f0f | [
"Apache-2.0"
] | null | null | null | cartography/classification/glue_utils.py | dyahadila/ood_cartography | ff65bf2b1a170e2913f0019a15af3398a1808f0f | [
"Apache-2.0"
] | null | null | null | import logging
import os
from transformers import glue_compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes
from transformers import glue_processors
from transformers.data.processors.glue import MnliMismatchedProcessor
from transformers.data.processors.utils import InputFeatures
from transformers.file_utils import is_tf_available
if is_tf_available():
import tensorflow as tf
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
from cartography.data_utils_glue import convert_string_to_unique_number
from cartography.classification.mnli_utils import AdaptedMnliMismatchedProcessor, AdaptedMnliProcessor
from cartography.classification.qnli_utils import AdaptedQnliProcessor
from cartography.classification.snli_utils import SNLIProcessor
from cartography.classification.winogrande_utils import WinograndeProcessor
from cartography.classification.wnli_utils import AdaptedWnliProcessor
from cartography.classification.rte_utils import AdaptedRteProcessor
import pandas as pd
glue_processors["snli"] = SNLIProcessor
glue_processors["mnli"] = AdaptedMnliProcessor
glue_processors["mnli-mm"] = AdaptedMnliMismatchedProcessor
glue_processors["qnli"] = AdaptedQnliProcessor
glue_processors["winogrande"] = WinograndeProcessor
glue_processors["wnli"] = AdaptedWnliProcessor
glue_processors["rte"] = AdaptedRteProcessor
glue_output_modes["snli"] = "classification"
glue_output_modes["winogrande"] = "classification"
class AdaptedInputFeatures(InputFeatures):
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, example_id=None, lex = None,
const=None, subs=None, original_idx=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.example_id = example_id
self.lex = lex
self.const = const
self.subs = subs
self.original_idx = original_idx
def get_instance_heuristics(task, data_split):
mode = data_split
### UNCOMMENT FOR MNLI
if 'dev' in data_split:
mode = 'dev'
if task.upper() == 'MNLI':
mode = 'dev_matched'
df = pd.read_csv("/home/jusun/adila001/{}/{}_heuristic.tsv".format(task.upper(), mode), delimiter="\t|\n")
lexical = df["lexical"].tolist()
if 'constituent' in set(df.columns):
constituent = df["constituent"].tolist()
else:
constituent = [0 for i in range(df.shape[0])]
subsequence = df["subsequence"].tolist()
return lexical, constituent, subsequence
def adapted_glue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
heuristics=True,
data_split='train',
):
"""
Adapted from `transformers`. New functionality: also return an integer ID for each example.
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
lex = []
const= []
subs = []
if heuristics==True:
lex, const, subs = get_instance_heuristics(task, data_split)
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
example_int_id = convert_string_to_unique_number(example.guid)
if ex_index < 5:
logger.info("*** Example ***")
logger.info(f"guid: {example_int_id}")
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
AdaptedInputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
example_id=example_int_id,
lex=None if len(lex)==0 else lex[ex_index],
const=None if len(const)==0 else const[ex_index],
subs=None if len(const)==0else subs[ex_index],
original_idx=ex_index))
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
def adapted_glue_compute_metrics(task_name, preds, labels):
"Adapted from `glue_compute_metrics` to also handle SNLI."
try:
return glue_compute_metrics(task_name, preds, labels)
except KeyError:
if task_name in ["snli", "winogrande", "toxic"]:
# Since MNLI also uses accuracy.
return glue_compute_metrics("mnli", preds, labels)
raise KeyError(task_name)
| 41.478814 | 130 | 0.647053 | import logging
import os
from transformers import glue_compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes
from transformers import glue_processors
from transformers.data.processors.glue import MnliMismatchedProcessor
from transformers.data.processors.utils import InputFeatures
from transformers.file_utils import is_tf_available
if is_tf_available():
import tensorflow as tf
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
from cartography.data_utils_glue import convert_string_to_unique_number
from cartography.classification.mnli_utils import AdaptedMnliMismatchedProcessor, AdaptedMnliProcessor
from cartography.classification.qnli_utils import AdaptedQnliProcessor
from cartography.classification.snli_utils import SNLIProcessor
from cartography.classification.winogrande_utils import WinograndeProcessor
from cartography.classification.wnli_utils import AdaptedWnliProcessor
from cartography.classification.rte_utils import AdaptedRteProcessor
import pandas as pd
glue_processors["snli"] = SNLIProcessor
glue_processors["mnli"] = AdaptedMnliProcessor
glue_processors["mnli-mm"] = AdaptedMnliMismatchedProcessor
glue_processors["qnli"] = AdaptedQnliProcessor
glue_processors["winogrande"] = WinograndeProcessor
glue_processors["wnli"] = AdaptedWnliProcessor
glue_processors["rte"] = AdaptedRteProcessor
glue_output_modes["snli"] = "classification"
glue_output_modes["winogrande"] = "classification"
class AdaptedInputFeatures(InputFeatures):
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, example_id=None, lex = None,
const=None, subs=None, original_idx=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.example_id = example_id
self.lex = lex
self.const = const
self.subs = subs
self.original_idx = original_idx
def get_instance_heuristics(task, data_split):
mode = data_split
= 'dev'
if task.upper() == 'MNLI':
mode = 'dev_matched'
df = pd.read_csv("/home/jusun/adila001/{}/{}_heuristic.tsv".format(task.upper(), mode), delimiter="\t|\n")
lexical = df["lexical"].tolist()
if 'constituent' in set(df.columns):
constituent = df["constituent"].tolist()
else:
constituent = [0 for i in range(df.shape[0])]
subsequence = df["subsequence"].tolist()
return lexical, constituent, subsequence
def adapted_glue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
heuristics=True,
data_split='train',
):
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
lex = []
const= []
subs = []
if heuristics==True:
lex, const, subs = get_instance_heuristics(task, data_split)
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
example_int_id = convert_string_to_unique_number(example.guid)
if ex_index < 5:
logger.info("*** Example ***")
logger.info(f"guid: {example_int_id}")
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
AdaptedInputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
example_id=example_int_id,
lex=None if len(lex)==0 else lex[ex_index],
const=None if len(const)==0 else const[ex_index],
subs=None if len(const)==0else subs[ex_index],
original_idx=ex_index))
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
def adapted_glue_compute_metrics(task_name, preds, labels):
try:
return glue_compute_metrics(task_name, preds, labels)
except KeyError:
if task_name in ["snli", "winogrande", "toxic"]:
return glue_compute_metrics("mnli", preds, labels)
raise KeyError(task_name)
| true | true |
f72ef9d32075ad3081247fcdbd108c4a87b8024a | 446 | py | Python | invenio_logging/version.py | kprzerwa/invenio-logging | 1eab0074eaf732f8bd303817e931fdf0c0a53b71 | [
"MIT"
] | null | null | null | invenio_logging/version.py | kprzerwa/invenio-logging | 1eab0074eaf732f8bd303817e931fdf0c0a53b71 | [
"MIT"
] | null | null | null | invenio_logging/version.py | kprzerwa/invenio-logging | 1eab0074eaf732f8bd303817e931fdf0c0a53b71 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-Logging.
This file is imported by ``invenio_logging.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '1.2.0'
| 24.777778 | 72 | 0.726457 |
from __future__ import absolute_import, print_function
__version__ = '1.2.0'
| true | true |
f72ef9eff5cffb76416d59a9c3337dac4842e086 | 2,066 | py | Python | flaskblog/posts/routes.py | amiinegal/Blog | b88b29603832048a1322cfd79b2cef0684282f4b | [
"Unlicense",
"MIT"
] | null | null | null | flaskblog/posts/routes.py | amiinegal/Blog | b88b29603832048a1322cfd79b2cef0684282f4b | [
"Unlicense",
"MIT"
] | null | null | null | flaskblog/posts/routes.py | amiinegal/Blog | b88b29603832048a1322cfd79b2cef0684282f4b | [
"Unlicense",
"MIT"
] | null | null | null | from flask import (render_template, url_for, flash,
redirect, request, abort, Blueprint)
from flask_login import current_user, login_required
from flaskblog import db
from flaskblog.models import Post
from flaskblog.posts.forms import PostForm
posts = Blueprint('posts', __name__)
@posts.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('main.home_page'))
return render_template('create_post.html', title='New Post',
form=form, legend='New Post')
@posts.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@posts.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=form, legend='Update Post')
@posts.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.home'))
| 33.868852 | 90 | 0.664085 | from flask import (render_template, url_for, flash,
redirect, request, abort, Blueprint)
from flask_login import current_user, login_required
from flaskblog import db
from flaskblog.models import Post
from flaskblog.posts.forms import PostForm
posts = Blueprint('posts', __name__)
@posts.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('main.home_page'))
return render_template('create_post.html', title='New Post',
form=form, legend='New Post')
@posts.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@posts.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=form, legend='Update Post')
@posts.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.home'))
| true | true |
f72ef9fd42c6c5d66574a205a52aee8fc64d8a20 | 690 | py | Python | backend/migrations/versions/ae566f24d973_.py | sartography/star-drive | c0f33378d42913c3e677e07f74eb46d7b2b82a0a | [
"MIT"
] | null | null | null | backend/migrations/versions/ae566f24d973_.py | sartography/star-drive | c0f33378d42913c3e677e07f74eb46d7b2b82a0a | [
"MIT"
] | 368 | 2018-12-18T14:43:20.000Z | 2022-03-02T02:54:18.000Z | backend/migrations/versions/ae566f24d973_.py | sartography/star-drive | c0f33378d42913c3e677e07f74eb46d7b2b82a0a | [
"MIT"
] | 2 | 2019-10-02T03:06:06.000Z | 2020-10-05T16:53:48.000Z | """empty message
Revision ID: ae566f24d973
Revises: 838f47fa598e
Create Date: 2020-02-18 17:02:08.574872
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ae566f24d973'
down_revision = '838f47fa598e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('stardrive_user', sa.Column('last_login', sa.DateTime(timezone=True), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('stardrive_user', 'last_login')
# ### end Alembic commands ###
| 23.793103 | 103 | 0.705797 | from alembic import op
import sqlalchemy as sa
revision = 'ae566f24d973'
down_revision = '838f47fa598e'
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f72efb136a1aa339fcf07bfbcbdf071f1369ec8a | 883 | py | Python | dynadb/migrations/0075_auto_20170124_1439.py | GPCRmd/GPCRmd | 7dc75359ace4a00c1597bdb7a86ebee17d51f09c | [
"Apache-2.0"
] | 3 | 2019-03-06T13:35:38.000Z | 2020-08-05T15:31:29.000Z | dynadb/migrations/0075_auto_20170124_1439.py | GPCRmd/GPCRmd | 7dc75359ace4a00c1597bdb7a86ebee17d51f09c | [
"Apache-2.0"
] | null | null | null | dynadb/migrations/0075_auto_20170124_1439.py | GPCRmd/GPCRmd | 7dc75359ace4a00c1597bdb7a86ebee17d51f09c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-24 13:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dynadb', '0074_auto_20170123_1212'),
]
operations = [
migrations.AlterField(
model_name='dyndbefficacy',
name='reference_id_compound',
field=models.ForeignKey(db_column='reference_id_compound', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='dynadb.DyndbCompound'),
),
migrations.AlterField(
model_name='dyndbefficacy',
name='type',
field=models.SmallIntegerField(choices=[(0, 'Full Agonist'), (1, 'Partial Agonist'), (2, 'Antagonist'), (3, 'Inverse Agonist'), (4, 'Other')], default=0),
),
]
| 32.703704 | 166 | 0.642129 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dynadb', '0074_auto_20170123_1212'),
]
operations = [
migrations.AlterField(
model_name='dyndbefficacy',
name='reference_id_compound',
field=models.ForeignKey(db_column='reference_id_compound', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='dynadb.DyndbCompound'),
),
migrations.AlterField(
model_name='dyndbefficacy',
name='type',
field=models.SmallIntegerField(choices=[(0, 'Full Agonist'), (1, 'Partial Agonist'), (2, 'Antagonist'), (3, 'Inverse Agonist'), (4, 'Other')], default=0),
),
]
| true | true |
f72efbf64861c201c9fde808e528c73dae484dc4 | 1,157 | py | Python | genomepy/plugins/minimap2.py | tilschaef/genomepy | 4c10e69b6886cf52381caf6498395391834a675b | [
"MIT"
] | 146 | 2019-11-19T16:07:46.000Z | 2022-03-15T16:10:31.000Z | genomepy/plugins/minimap2.py | tilschaef/genomepy | 4c10e69b6886cf52381caf6498395391834a675b | [
"MIT"
] | 125 | 2019-11-19T18:08:23.000Z | 2022-03-30T09:16:46.000Z | genomepy/plugins/minimap2.py | tilschaef/genomepy | 4c10e69b6886cf52381caf6498395391834a675b | [
"MIT"
] | 18 | 2019-12-02T15:54:34.000Z | 2022-03-04T19:16:31.000Z | import os
from genomepy.plugins import Plugin
from genomepy.utils import cmd_ok, mkdir_p, rm_rf, run_index_cmd
class Minimap2Plugin(Plugin):
def after_genome_download(self, genome, threads=1, force=False):
if not cmd_ok("minimap2"):
return
# Create index dir
index_dir = genome.plugin["minimap2"]["index_dir"]
index_name = genome.plugin["minimap2"]["index_name"]
if force:
# Start from scratch
rm_rf(index_dir)
mkdir_p(index_dir)
if not any(fname.endswith(".mmi") for fname in os.listdir(index_dir)):
# Create index
cmd = f"minimap2 -t {threads} -d {index_name} {genome.filename}"
run_index_cmd("minimap2", cmd)
def get_properties(self, genome):
props = {
"index_dir": os.path.join(
os.path.dirname(genome.filename), "index", "minimap2"
),
"index_name": os.path.join(
os.path.dirname(genome.filename),
"index",
"minimap2",
f"{genome.name}.mmi",
),
}
return props
| 30.447368 | 78 | 0.560069 | import os
from genomepy.plugins import Plugin
from genomepy.utils import cmd_ok, mkdir_p, rm_rf, run_index_cmd
class Minimap2Plugin(Plugin):
def after_genome_download(self, genome, threads=1, force=False):
if not cmd_ok("minimap2"):
return
index_dir = genome.plugin["minimap2"]["index_dir"]
index_name = genome.plugin["minimap2"]["index_name"]
if force:
rm_rf(index_dir)
mkdir_p(index_dir)
if not any(fname.endswith(".mmi") for fname in os.listdir(index_dir)):
cmd = f"minimap2 -t {threads} -d {index_name} {genome.filename}"
run_index_cmd("minimap2", cmd)
def get_properties(self, genome):
props = {
"index_dir": os.path.join(
os.path.dirname(genome.filename), "index", "minimap2"
),
"index_name": os.path.join(
os.path.dirname(genome.filename),
"index",
"minimap2",
f"{genome.name}.mmi",
),
}
return props
| true | true |
f72efca428a828d722447e9aced971774beef150 | 4,183 | py | Python | python/nano/src/bigdl/nano/automl/tf/objective.py | Forest216/BigDL | 840da9a2eaf395978dd83730b02aa5e5dfbd7989 | [
"Apache-2.0"
] | null | null | null | python/nano/src/bigdl/nano/automl/tf/objective.py | Forest216/BigDL | 840da9a2eaf395978dd83730b02aa5e5dfbd7989 | [
"Apache-2.0"
] | null | null | null | python/nano/src/bigdl/nano/automl/tf/objective.py | Forest216/BigDL | 840da9a2eaf395978dd83730b02aa5e5dfbd7989 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from selectors import EpollSelector
from tensorflow.keras.backend import clear_session
from tensorflow.keras.models import clone_model
import tensorflow as tf
import inspect
import copy
from bigdl.nano.automl.hpo.backend import create_tfkeras_pruning_callback
from bigdl.nano.utils.log4Error import invalidInputError
def _is_creator(model):
return inspect.ismethod(model) or inspect.isfunction(model)
class Objective(object):
"""The Tuning objective for HPO."""
def __init__(self,
model=None,
target_metric=None,
pruning=False,
backend=None,
**kwargs
):
"""
Init the objective.
:param: model: a model instance or a creator function.
Defaults to None.
:param: target_metric: str(optional): target metric to optimize.
Defaults to None.
:param: pruning: bool (optional): whether to enable pruning.
Defaults to False.
throw: ValueError: _description_
"""
if not _is_creator(model) and not isinstance(model, tf.keras.Model):
invalidInputError(False,
"You should either pass a Tensorflo Keras model, or "
"a model_creator to the Tuning objective.")
self.model_ = model
self.target_metric_ = target_metric
self.pruning = pruning
self.backend = backend
self.kwargs = kwargs
@property
def target_metric(self):
"""Get the target metric."""
return self.target_metric_
@target_metric.setter
def target_metric(self, value):
"""Set the target metric."""
# TODO add more validity check here
self.target_metric_ = value
def _prepare_fit_args(self, trial):
# only do shallow copy and process/duplicate
# specific args TODO: may need to handle more cases
new_kwargs = copy.copy(self.kwargs)
new_kwargs['verbose'] = 2
# process batch size
new_kwargs = self.backend.instantiate_param(trial, new_kwargs, 'batch_size')
# process callbacks
callbacks = new_kwargs.get('callbacks', None)
callbacks = callbacks() if inspect.isfunction(callbacks) else callbacks
if self.pruning:
callbacks = callbacks or []
prune_callback = create_tfkeras_pruning_callback(trial, self.target_metric)
callbacks.append(prune_callback)
new_kwargs['callbacks'] = callbacks
return new_kwargs
def __call__(self, trial):
"""
Execute Training and return target metric in each trial.
:param: trial: the trial object which provides the hyperparameter combinition.
:return: the target metric value.
"""
# Clear clutter from previous Keras session graphs.
clear_session()
# TODO may add data creator here, e.g. refresh data, reset generators, etc.
# create model
if _is_creator(self.model_):
model = self.model_(trial)
else:
# copy model so that the original model is not changed
# Need tests to check this path
model = clone_model(self.model_)
# fit
new_kwargs = self._prepare_fit_args(trial)
hist = model.fit(**new_kwargs)
score = hist.history.get(self.target_metric, None)
if score is not None:
if isinstance(score, list):
# score = score[-1]
score = max(score)
return score
| 33.464 | 87 | 0.638059 |
from selectors import EpollSelector
from tensorflow.keras.backend import clear_session
from tensorflow.keras.models import clone_model
import tensorflow as tf
import inspect
import copy
from bigdl.nano.automl.hpo.backend import create_tfkeras_pruning_callback
from bigdl.nano.utils.log4Error import invalidInputError
def _is_creator(model):
return inspect.ismethod(model) or inspect.isfunction(model)
class Objective(object):
def __init__(self,
model=None,
target_metric=None,
pruning=False,
backend=None,
**kwargs
):
if not _is_creator(model) and not isinstance(model, tf.keras.Model):
invalidInputError(False,
"You should either pass a Tensorflo Keras model, or "
"a model_creator to the Tuning objective.")
self.model_ = model
self.target_metric_ = target_metric
self.pruning = pruning
self.backend = backend
self.kwargs = kwargs
@property
def target_metric(self):
return self.target_metric_
@target_metric.setter
def target_metric(self, value):
self.target_metric_ = value
def _prepare_fit_args(self, trial):
new_kwargs = copy.copy(self.kwargs)
new_kwargs['verbose'] = 2
new_kwargs = self.backend.instantiate_param(trial, new_kwargs, 'batch_size')
callbacks = new_kwargs.get('callbacks', None)
callbacks = callbacks() if inspect.isfunction(callbacks) else callbacks
if self.pruning:
callbacks = callbacks or []
prune_callback = create_tfkeras_pruning_callback(trial, self.target_metric)
callbacks.append(prune_callback)
new_kwargs['callbacks'] = callbacks
return new_kwargs
def __call__(self, trial):
clear_session()
if _is_creator(self.model_):
model = self.model_(trial)
else:
model = clone_model(self.model_)
new_kwargs = self._prepare_fit_args(trial)
hist = model.fit(**new_kwargs)
score = hist.history.get(self.target_metric, None)
if score is not None:
if isinstance(score, list):
score = max(score)
return score
| true | true |
f72efd44eb8409c927c2b099abb4c95c6681f60d | 2,428 | py | Python | examples/python/onnx/mnist_mlp.py | jiazhihao/FlexFlow-1 | b9bf0b615d8cf6d22bc38de4755b76ee3f8c4c22 | [
"Apache-2.0"
] | 1 | 2021-03-09T05:43:58.000Z | 2021-03-09T05:43:58.000Z | examples/python/onnx/mnist_mlp.py | jiazhihao/FlexFlow-1 | b9bf0b615d8cf6d22bc38de4755b76ee3f8c4c22 | [
"Apache-2.0"
] | null | null | null | examples/python/onnx/mnist_mlp.py | jiazhihao/FlexFlow-1 | b9bf0b615d8cf6d22bc38de4755b76ee3f8c4c22 | [
"Apache-2.0"
] | null | null | null | from flexflow.core import *
import numpy as np
from flexflow.keras.datasets import mnist
from flexflow.onnx.model import ONNXModel
from accuracy import ModelAccuracy
def top_level_task():
ffconfig = FFConfig()
ffconfig.parse_args()
print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.get_batch_size(), ffconfig.get_workers_per_node(), ffconfig.get_num_nodes()))
ffmodel = FFModel(ffconfig)
dims1 = [ffconfig.get_batch_size(), 784]
input1 = ffmodel.create_tensor(dims1, DataType.DT_FLOAT);
num_samples = 60000
onnx_model = ONNXModel("mnist_mlp.onnx")
t = onnx_model.apply(ffmodel, {"input.1": input1})
ffoptimizer = SGDOptimizer(ffmodel, 0.01)
ffmodel.set_sgd_optimizer(ffoptimizer)
ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
label = ffmodel.get_label_tensor()
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_train = x_train.astype('float32')
x_train /= 255
y_train = y_train.astype('int32')
y_train = np.reshape(y_train, (len(y_train), 1))
dims_full_input = [num_samples, 784]
full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)
dims_full_label = [num_samples, 1]
full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)
full_input.attach_numpy_array(ffconfig, x_train)
full_label.attach_numpy_array(ffconfig, y_train)
dataloader_input = SingleDataLoader(ffmodel, input1, full_input, num_samples, DataType.DT_FLOAT)
dataloader_label = SingleDataLoader(ffmodel, label, full_label, num_samples, DataType.DT_INT32)
full_input.detach_numpy_array(ffconfig)
full_label.detach_numpy_array(ffconfig)
ffmodel.init_layers()
epochs = ffconfig.get_epochs()
ts_start = ffconfig.get_current_time()
ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start);
print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
perf_metrics = ffmodel.get_perf_metrics()
accuracy = perf_metrics.get_accuracy()
if accuracy < ModelAccuracy.MNIST_MLP.value:
assert 0, 'Check Accuracy'
if __name__ == "__main__":
print("mnist mlp onnx")
top_level_task()
| 34.685714 | 167 | 0.76112 | from flexflow.core import *
import numpy as np
from flexflow.keras.datasets import mnist
from flexflow.onnx.model import ONNXModel
from accuracy import ModelAccuracy
def top_level_task():
ffconfig = FFConfig()
ffconfig.parse_args()
print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.get_batch_size(), ffconfig.get_workers_per_node(), ffconfig.get_num_nodes()))
ffmodel = FFModel(ffconfig)
dims1 = [ffconfig.get_batch_size(), 784]
input1 = ffmodel.create_tensor(dims1, DataType.DT_FLOAT);
num_samples = 60000
onnx_model = ONNXModel("mnist_mlp.onnx")
t = onnx_model.apply(ffmodel, {"input.1": input1})
ffoptimizer = SGDOptimizer(ffmodel, 0.01)
ffmodel.set_sgd_optimizer(ffoptimizer)
ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
label = ffmodel.get_label_tensor()
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_train = x_train.astype('float32')
x_train /= 255
y_train = y_train.astype('int32')
y_train = np.reshape(y_train, (len(y_train), 1))
dims_full_input = [num_samples, 784]
full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)
dims_full_label = [num_samples, 1]
full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)
full_input.attach_numpy_array(ffconfig, x_train)
full_label.attach_numpy_array(ffconfig, y_train)
dataloader_input = SingleDataLoader(ffmodel, input1, full_input, num_samples, DataType.DT_FLOAT)
dataloader_label = SingleDataLoader(ffmodel, label, full_label, num_samples, DataType.DT_INT32)
full_input.detach_numpy_array(ffconfig)
full_label.detach_numpy_array(ffconfig)
ffmodel.init_layers()
epochs = ffconfig.get_epochs()
ts_start = ffconfig.get_current_time()
ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start);
print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
perf_metrics = ffmodel.get_perf_metrics()
accuracy = perf_metrics.get_accuracy()
if accuracy < ModelAccuracy.MNIST_MLP.value:
assert 0, 'Check Accuracy'
if __name__ == "__main__":
print("mnist mlp onnx")
top_level_task()
| true | true |
f72efd6fc94e91466ebd1578b756f630faaefffc | 1,461 | py | Python | spiral.py | vimithamanohar/practice | 3e5372aeb29b9db3467c97ef8c4f879fff1ac7b7 | [
"MIT"
] | null | null | null | spiral.py | vimithamanohar/practice | 3e5372aeb29b9db3467c97ef8c4f879fff1ac7b7 | [
"MIT"
] | null | null | null | spiral.py | vimithamanohar/practice | 3e5372aeb29b9db3467c97ef8c4f879fff1ac7b7 | [
"MIT"
] | null | null | null | import math
import unittest
def get_line(arr, x, y, ln, dx, dy):
ret = []
for i in range(ln):
ret.append(arr[x][y])
x = x + dx
y = y + dy
return ret
def get_square(arr, x, y, ln):
if ln == 0:
return []
if ln == 1:
return [arr[x][y]]
ret = []
ret.extend(get_line(arr, x, y, ln - 1, 0, 1))
ret.extend(get_line(arr, x, y + ln - 1, ln - 1, 1, 0))
ret.extend(get_line(arr, x + ln - 1, y + ln - 1, ln - 1, 0, -1))
ret.extend(get_line(arr, x + ln - 1, y, ln - 1, -1, 0))
return ret
def get_spiral(arr):
arr_len = len(arr)
if arr_len == 0:
return []
ret = []
for i in range(math.ceil(arr_len / 2)):
ret.extend(get_square(arr, i, i, arr_len - i * 2))
return ret
class TestSpiral(unittest.TestCase):
def test_len_3(self):
a = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
self.assertEqual(get_spiral(a), [1, 2, 3, 6, 9, 8, 7, 4, 5])
def test_len_4(self):
a = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]
self.assertEqual(get_spiral(a), [1, 2, 3, 4, 8, 12, 16, 15, 14, 13, 9, 5, 6, 7, 11, 10])
def test_len_1(self):
a = [[1]]
self.assertEqual(get_spiral(a), [1])
def test_len_0(self):
a = []
self.assertEqual(get_spiral(a), [])
if __name__ == '__main__':
unittest.main()
| 22.476923 | 96 | 0.473648 | import math
import unittest
def get_line(arr, x, y, ln, dx, dy):
ret = []
for i in range(ln):
ret.append(arr[x][y])
x = x + dx
y = y + dy
return ret
def get_square(arr, x, y, ln):
if ln == 0:
return []
if ln == 1:
return [arr[x][y]]
ret = []
ret.extend(get_line(arr, x, y, ln - 1, 0, 1))
ret.extend(get_line(arr, x, y + ln - 1, ln - 1, 1, 0))
ret.extend(get_line(arr, x + ln - 1, y + ln - 1, ln - 1, 0, -1))
ret.extend(get_line(arr, x + ln - 1, y, ln - 1, -1, 0))
return ret
def get_spiral(arr):
arr_len = len(arr)
if arr_len == 0:
return []
ret = []
for i in range(math.ceil(arr_len / 2)):
ret.extend(get_square(arr, i, i, arr_len - i * 2))
return ret
class TestSpiral(unittest.TestCase):
def test_len_3(self):
a = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
self.assertEqual(get_spiral(a), [1, 2, 3, 6, 9, 8, 7, 4, 5])
def test_len_4(self):
a = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]
self.assertEqual(get_spiral(a), [1, 2, 3, 4, 8, 12, 16, 15, 14, 13, 9, 5, 6, 7, 11, 10])
def test_len_1(self):
a = [[1]]
self.assertEqual(get_spiral(a), [1])
def test_len_0(self):
a = []
self.assertEqual(get_spiral(a), [])
if __name__ == '__main__':
unittest.main()
| true | true |
f72efd91fc83e7a45d20c311f82e965626069a2c | 13,324 | py | Python | i3pystatus/network.py | MaicoTimmerman/i3pystatus | dbfc94575b287420159434df2bb00fedeebeb2ed | [
"MIT"
] | null | null | null | i3pystatus/network.py | MaicoTimmerman/i3pystatus | dbfc94575b287420159434df2bb00fedeebeb2ed | [
"MIT"
] | null | null | null | i3pystatus/network.py | MaicoTimmerman/i3pystatus | dbfc94575b287420159434df2bb00fedeebeb2ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import netifaces
from i3pystatus import IntervalModule
from i3pystatus.core.color import ColorRangeModule
from i3pystatus.core.util import make_graph, round_dict, make_bar
def count_bits(integer):
bits = 0
while (integer):
integer &= integer - 1
bits += 1
return bits
def v6_to_int(v6):
return int(v6.replace(":", ""), 16)
def prefix6(mask):
return count_bits(v6_to_int(mask))
def cidr6(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix6(mask))
def v4_to_int(v4):
sum = 0
mul = 1
for part in reversed(v4.split(".")):
sum += int(part) * mul
mul *= 2 ** 8
return sum
def prefix4(mask):
return count_bits(v4_to_int(mask))
def cidr4(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix4(mask))
def get_bonded_slaves():
try:
with open("/sys/class/net/bonding_masters") as f:
masters = f.read().split()
except FileNotFoundError:
return {}
slaves = {}
for master in masters:
with open("/sys/class/net/{}/bonding/slaves".format(master)) as f:
for slave in f.read().split():
slaves[slave] = master
return slaves
def sysfs_interface_up(interface, unknown_up=False):
try:
with open("/sys/class/net/{}/operstate".format(interface)) as f:
status = f.read().strip()
except FileNotFoundError:
# Interface doesn't exist
return False
return status == "up" or unknown_up and status == "unknown"
class NetworkInfo():
"""
Retrieve network information.
"""
def __init__(self, interface, ignore_interfaces, detached_down, unknown_up, get_wifi_info=False):
if interface not in netifaces.interfaces() and not detached_down:
raise RuntimeError(
"Unknown interface {iface}!".format(iface=interface))
self.ignore_interfaces = ignore_interfaces
self.detached_down = detached_down
self.unknown_up = unknown_up
self.get_wifi_info = get_wifi_info
def get_info(self, interface):
format_dict = dict(v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="")
iface_up = sysfs_interface_up(interface, self.unknown_up)
if not iface_up:
return format_dict
network_info = netifaces.ifaddresses(interface)
slaves = get_bonded_slaves()
try:
master = slaves[interface]
except KeyError:
pass
else:
if sysfs_interface_up(interface, self.unknown_up):
master_info = netifaces.ifaddresses(master)
for af in (netifaces.AF_INET, netifaces.AF_INET6):
try:
network_info[af] = master_info[af]
except KeyError:
pass
try:
mac = network_info[netifaces.AF_PACKET][0]["addr"]
except KeyError:
mac = "NONE"
format_dict['mac'] = mac
if iface_up:
format_dict.update(self.extract_network_info(network_info))
format_dict.update(self.extract_wireless_info(interface))
return format_dict
@staticmethod
def extract_network_info(network_info):
info = dict()
if netifaces.AF_INET in network_info:
v4 = network_info[netifaces.AF_INET][0]
info["v4"] = v4["addr"]
info["v4mask"] = v4["netmask"]
info["v4cidr"] = cidr4(v4["addr"], v4["netmask"])
if netifaces.AF_INET6 in network_info:
for v6 in network_info[netifaces.AF_INET6]:
info["v6"] = v6["addr"]
info["v6mask"] = v6["netmask"]
info["v6cidr"] = cidr6(v6["addr"], v6["netmask"])
if not v6["addr"].startswith("fe80::"): # prefer non link-local addresses
break
return info
def extract_wireless_info(self, interface):
info = dict(essid="", freq="", quality=0.0, quality_bar="")
# Just return empty values if we're not using any Wifi functionality
if not self.get_wifi_info:
return info
import basiciw
try:
iwi = basiciw.iwinfo(interface)
except Exception:
# Not a wireless interface
return info
info["essid"] = iwi["essid"]
info["freq"] = iwi["freq"]
quality = iwi["quality"]
if quality["quality_max"] > 0:
info["quality"] = quality["quality"] / quality["quality_max"]
else:
info["quality"] = quality["quality"]
info["quality"] *= 100
info["quality_bar"] = make_bar(info["quality"])
info["quality"] = round(info["quality"])
return info
class NetworkTraffic():
"""
Retrieve network traffic information
"""
pnic = None
pnic_before = None
def __init__(self, unknown_up, divisor, round_size):
self.unknown_up = unknown_up
self.divisor = divisor
self.round_size = round_size
def update_counters(self, interface):
import psutil
self.pnic_before = self.pnic
counters = psutil.net_io_counters(pernic=True)
self.pnic = counters[interface] if interface in counters else None
def clear_counters(self):
self.pnic_before = None
self.pnic = None
def get_bytes_sent(self):
return (self.pnic.bytes_sent - self.pnic_before.bytes_sent) / self.divisor
def get_bytes_received(self):
return (self.pnic.bytes_recv - self.pnic_before.bytes_recv) / self.divisor
def get_packets_sent(self):
return self.pnic.packets_sent - self.pnic_before.packets_sent
def get_packets_received(self):
return self.pnic.packets_recv - self.pnic_before.packets_recv
def get_usage(self, interface):
self.update_counters(interface)
usage = dict(bytes_sent=0, bytes_recv=0, packets_sent=0, packets_recv=0)
if not sysfs_interface_up(interface, self.unknown_up) or not self.pnic_before:
return usage
else:
usage["bytes_sent"] = self.get_bytes_sent()
usage["bytes_recv"] = self.get_bytes_received()
usage["packets_sent"] = self.get_packets_sent()
usage["packets_recv"] = self.get_packets_received()
round_dict(usage, self.round_size)
return usage
class Network(IntervalModule, ColorRangeModule):
"""
Displays network information for an interface.
Requires the PyPI packages `psutil`, `colour`, `netifaces` and `basiciw`
.. rubric:: Available formatters
Network Traffic Formatters:
* `{interface}` — the configured network interface
* `{kbs}` – Float representing kb\s
* `{network_graph}` – Unicode graph representing network usage
* `{bytes_sent}` — bytes sent per second (divided by divisor)
* `{bytes_recv}` — bytes received per second (divided by divisor)
* `{packets_sent}` — bytes sent per second (divided by divisor)
* `{packets_recv}` — bytes received per second (divided by divisor)
Network Information Formatters:
* `{interface}` — same as setting
* `{v4}` — IPv4 address
* `{v4mask}` — subnet mask
* `{v4cidr}` — IPv4 address in cidr notation (i.e. 192.168.2.204/24)
* `{v6}` — IPv6 address
* `{v6mask}` — subnet mask
* `{v6cidr}` — IPv6 address in cidr notation
* `{mac}` — MAC of interface
Wireless Information Formatters:
* `{essid}` — ESSID of currently connected wifi
* `{freq}` — Current frequency
* `{quality}` — Link quality in percent
* `{quality_bar}` —Bar graphically representing link quality
"""
settings = (
("format_up", "format string"),
("format_down", "format string"),
"color_up",
"color_down",
("interface", "Interface to watch, eg 'eth0'"),
("dynamic_color", "Set color dynamically based on network traffic. Note: this overrides color_up"),
("start_color", "Hex or English name for start of color range, eg '#00FF00' or 'green'"),
("end_color", "Hex or English name for end of color range, eg '#FF0000' or 'red'"),
("graph_width", "Width of the network traffic graph"),
("graph_style", "Graph style ('blocks', 'braille-fill', 'braille-peak', or 'braille-snake')"),
("upper_limit",
"Expected max kb/s. This value controls how the network traffic graph is drawn and in what color"),
("graph_type", "Whether to draw the network traffic graph for input or output. "
"Allowed values 'input' or 'output'"),
("divisor", "divide all byte values by this value"),
("ignore_interfaces", "Array of interfaces to ignore when cycling through "
"on click, eg, ['lo']"),
("round_size", "defines number of digits in round"),
("detached_down", "If the interface doesn't exist, display it as if it were down"),
("unknown_up", "If the interface is in unknown state, display it as if it were up"),
)
interval = 1
interface = 'eth0'
format_up = "{interface} {network_graph}{kbs}KB/s"
format_down = "{interface}: DOWN"
color_up = "#00FF00"
color_down = "#FF0000"
dynamic_color = True
graph_type = 'input'
graph_width = 15
graph_style = 'blocks'
upper_limit = 150.0
# Network traffic settings
divisor = 1024
round_size = None
# Network info settings
detached_down = True
unknown_up = False
ignore_interfaces = ["lo"]
on_leftclick = "nm-connection-editor"
on_rightclick = "cycle_interface"
on_upscroll = ['cycle_interface', 1]
on_downscroll = ['cycle_interface', -1]
def init(self):
# Don't require importing basiciw unless using the functionality it offers.
if any(s in self.format_up or s in self.format_up for s in
['essid', 'freq', 'quality', 'quality_bar']):
get_wifi_info = True
else:
get_wifi_info = False
self.network_info = NetworkInfo(self.interface, self.ignore_interfaces, self.detached_down, self.unknown_up,
get_wifi_info)
# Don't require importing psutil unless using the functionality it offers.
if any(s in self.format_up or s in self.format_down for s in
['bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'network_graph', 'kbs']):
self.network_traffic = NetworkTraffic(self.unknown_up, self.divisor, self.round_size)
else:
self.network_traffic = None
if not self.dynamic_color:
self.end_color = self.start_color
self.colors = self.get_hex_color_range(self.start_color, self.end_color, int(self.upper_limit))
self.kbs_arr = [0.0] * self.graph_width
def cycle_interface(self, increment=1):
interfaces = [i for i in netifaces.interfaces() if i not in self.ignore_interfaces]
if self.interface in interfaces:
next_index = (interfaces.index(self.interface) + increment) % len(interfaces)
self.interface = interfaces[next_index]
elif len(interfaces) > 0:
self.interface = interfaces[0]
if self.network_traffic:
self.network_traffic.clear_counters()
self.kbs_arr = [0.0] * self.graph_width
def get_network_graph(self, kbs):
# Cycle array by inserting at the start and chopping off the last element
self.kbs_arr.insert(0, kbs)
self.kbs_arr = self.kbs_arr[:self.graph_width]
return make_graph(self.kbs_arr, 0.0, self.upper_limit, self.graph_style)
def run(self):
format_values = dict(kbs="", network_graph="", bytes_sent="", bytes_recv="", packets_sent="", packets_recv="",
interface="", v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="", mac="",
essid="", freq="", quality="", quality_bar="")
if self.network_traffic:
network_usage = self.network_traffic.get_usage(self.interface)
format_values.update(network_usage)
if self.graph_type == 'input':
kbs = network_usage['bytes_recv']
elif self.graph_type == 'output':
kbs = network_usage['bytes_sent']
else:
raise Exception("graph_type must be either 'input' or 'output'!")
format_values['network_graph'] = self.get_network_graph(kbs)
format_values['kbs'] = "{0:.1f}".format(round(kbs, 2)).rjust(6)
color = self.get_gradient(kbs, self.colors, self.upper_limit)
else:
color = None
if sysfs_interface_up(self.interface, self.unknown_up):
if not color:
color = self.color_up
format_str = self.format_up
else:
color = self.color_down
format_str = self.format_down
network_info = self.network_info.get_info(self.interface)
format_values.update(network_info)
format_values['interface'] = self.interface
self.output = {
"full_text": format_str.format(**format_values),
'color': color,
}
| 35.248677 | 118 | 0.611828 |
import netifaces
from i3pystatus import IntervalModule
from i3pystatus.core.color import ColorRangeModule
from i3pystatus.core.util import make_graph, round_dict, make_bar
def count_bits(integer):
bits = 0
while (integer):
integer &= integer - 1
bits += 1
return bits
def v6_to_int(v6):
return int(v6.replace(":", ""), 16)
def prefix6(mask):
return count_bits(v6_to_int(mask))
def cidr6(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix6(mask))
def v4_to_int(v4):
sum = 0
mul = 1
for part in reversed(v4.split(".")):
sum += int(part) * mul
mul *= 2 ** 8
return sum
def prefix4(mask):
return count_bits(v4_to_int(mask))
def cidr4(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix4(mask))
def get_bonded_slaves():
try:
with open("/sys/class/net/bonding_masters") as f:
masters = f.read().split()
except FileNotFoundError:
return {}
slaves = {}
for master in masters:
with open("/sys/class/net/{}/bonding/slaves".format(master)) as f:
for slave in f.read().split():
slaves[slave] = master
return slaves
def sysfs_interface_up(interface, unknown_up=False):
try:
with open("/sys/class/net/{}/operstate".format(interface)) as f:
status = f.read().strip()
except FileNotFoundError:
return False
return status == "up" or unknown_up and status == "unknown"
class NetworkInfo():
def __init__(self, interface, ignore_interfaces, detached_down, unknown_up, get_wifi_info=False):
if interface not in netifaces.interfaces() and not detached_down:
raise RuntimeError(
"Unknown interface {iface}!".format(iface=interface))
self.ignore_interfaces = ignore_interfaces
self.detached_down = detached_down
self.unknown_up = unknown_up
self.get_wifi_info = get_wifi_info
def get_info(self, interface):
format_dict = dict(v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="")
iface_up = sysfs_interface_up(interface, self.unknown_up)
if not iface_up:
return format_dict
network_info = netifaces.ifaddresses(interface)
slaves = get_bonded_slaves()
try:
master = slaves[interface]
except KeyError:
pass
else:
if sysfs_interface_up(interface, self.unknown_up):
master_info = netifaces.ifaddresses(master)
for af in (netifaces.AF_INET, netifaces.AF_INET6):
try:
network_info[af] = master_info[af]
except KeyError:
pass
try:
mac = network_info[netifaces.AF_PACKET][0]["addr"]
except KeyError:
mac = "NONE"
format_dict['mac'] = mac
if iface_up:
format_dict.update(self.extract_network_info(network_info))
format_dict.update(self.extract_wireless_info(interface))
return format_dict
@staticmethod
def extract_network_info(network_info):
info = dict()
if netifaces.AF_INET in network_info:
v4 = network_info[netifaces.AF_INET][0]
info["v4"] = v4["addr"]
info["v4mask"] = v4["netmask"]
info["v4cidr"] = cidr4(v4["addr"], v4["netmask"])
if netifaces.AF_INET6 in network_info:
for v6 in network_info[netifaces.AF_INET6]:
info["v6"] = v6["addr"]
info["v6mask"] = v6["netmask"]
info["v6cidr"] = cidr6(v6["addr"], v6["netmask"])
if not v6["addr"].startswith("fe80::"): # prefer non link-local addresses
break
return info
def extract_wireless_info(self, interface):
info = dict(essid="", freq="", quality=0.0, quality_bar="")
# Just return empty values if we're not using any Wifi functionality
if not self.get_wifi_info:
return info
import basiciw
try:
iwi = basiciw.iwinfo(interface)
except Exception:
return info
info["essid"] = iwi["essid"]
info["freq"] = iwi["freq"]
quality = iwi["quality"]
if quality["quality_max"] > 0:
info["quality"] = quality["quality"] / quality["quality_max"]
else:
info["quality"] = quality["quality"]
info["quality"] *= 100
info["quality_bar"] = make_bar(info["quality"])
info["quality"] = round(info["quality"])
return info
class NetworkTraffic():
pnic = None
pnic_before = None
def __init__(self, unknown_up, divisor, round_size):
self.unknown_up = unknown_up
self.divisor = divisor
self.round_size = round_size
def update_counters(self, interface):
import psutil
self.pnic_before = self.pnic
counters = psutil.net_io_counters(pernic=True)
self.pnic = counters[interface] if interface in counters else None
def clear_counters(self):
self.pnic_before = None
self.pnic = None
def get_bytes_sent(self):
return (self.pnic.bytes_sent - self.pnic_before.bytes_sent) / self.divisor
def get_bytes_received(self):
return (self.pnic.bytes_recv - self.pnic_before.bytes_recv) / self.divisor
def get_packets_sent(self):
return self.pnic.packets_sent - self.pnic_before.packets_sent
def get_packets_received(self):
return self.pnic.packets_recv - self.pnic_before.packets_recv
def get_usage(self, interface):
self.update_counters(interface)
usage = dict(bytes_sent=0, bytes_recv=0, packets_sent=0, packets_recv=0)
if not sysfs_interface_up(interface, self.unknown_up) or not self.pnic_before:
return usage
else:
usage["bytes_sent"] = self.get_bytes_sent()
usage["bytes_recv"] = self.get_bytes_received()
usage["packets_sent"] = self.get_packets_sent()
usage["packets_recv"] = self.get_packets_received()
round_dict(usage, self.round_size)
return usage
class Network(IntervalModule, ColorRangeModule):
settings = (
("format_up", "format string"),
("format_down", "format string"),
"color_up",
"color_down",
("interface", "Interface to watch, eg 'eth0'"),
("dynamic_color", "Set color dynamically based on network traffic. Note: this overrides color_up"),
("start_color", "Hex or English name for start of color range, eg '#00FF00' or 'green'"),
("end_color", "Hex or English name for end of color range, eg '#FF0000' or 'red'"),
("graph_width", "Width of the network traffic graph"),
("graph_style", "Graph style ('blocks', 'braille-fill', 'braille-peak', or 'braille-snake')"),
("upper_limit",
"Expected max kb/s. This value controls how the network traffic graph is drawn and in what color"),
("graph_type", "Whether to draw the network traffic graph for input or output. "
"Allowed values 'input' or 'output'"),
("divisor", "divide all byte values by this value"),
("ignore_interfaces", "Array of interfaces to ignore when cycling through "
"on click, eg, ['lo']"),
("round_size", "defines number of digits in round"),
("detached_down", "If the interface doesn't exist, display it as if it were down"),
("unknown_up", "If the interface is in unknown state, display it as if it were up"),
)
interval = 1
interface = 'eth0'
format_up = "{interface} {network_graph}{kbs}KB/s"
format_down = "{interface}: DOWN"
color_up = "#00FF00"
color_down = "#FF0000"
dynamic_color = True
graph_type = 'input'
graph_width = 15
graph_style = 'blocks'
upper_limit = 150.0
# Network traffic settings
divisor = 1024
round_size = None
# Network info settings
detached_down = True
unknown_up = False
ignore_interfaces = ["lo"]
on_leftclick = "nm-connection-editor"
on_rightclick = "cycle_interface"
on_upscroll = ['cycle_interface', 1]
on_downscroll = ['cycle_interface', -1]
def init(self):
# Don't require importing basiciw unless using the functionality it offers.
if any(s in self.format_up or s in self.format_up for s in
['essid', 'freq', 'quality', 'quality_bar']):
get_wifi_info = True
else:
get_wifi_info = False
self.network_info = NetworkInfo(self.interface, self.ignore_interfaces, self.detached_down, self.unknown_up,
get_wifi_info)
if any(s in self.format_up or s in self.format_down for s in
['bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'network_graph', 'kbs']):
self.network_traffic = NetworkTraffic(self.unknown_up, self.divisor, self.round_size)
else:
self.network_traffic = None
if not self.dynamic_color:
self.end_color = self.start_color
self.colors = self.get_hex_color_range(self.start_color, self.end_color, int(self.upper_limit))
self.kbs_arr = [0.0] * self.graph_width
def cycle_interface(self, increment=1):
interfaces = [i for i in netifaces.interfaces() if i not in self.ignore_interfaces]
if self.interface in interfaces:
next_index = (interfaces.index(self.interface) + increment) % len(interfaces)
self.interface = interfaces[next_index]
elif len(interfaces) > 0:
self.interface = interfaces[0]
if self.network_traffic:
self.network_traffic.clear_counters()
self.kbs_arr = [0.0] * self.graph_width
def get_network_graph(self, kbs):
# Cycle array by inserting at the start and chopping off the last element
self.kbs_arr.insert(0, kbs)
self.kbs_arr = self.kbs_arr[:self.graph_width]
return make_graph(self.kbs_arr, 0.0, self.upper_limit, self.graph_style)
def run(self):
format_values = dict(kbs="", network_graph="", bytes_sent="", bytes_recv="", packets_sent="", packets_recv="",
interface="", v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="", mac="",
essid="", freq="", quality="", quality_bar="")
if self.network_traffic:
network_usage = self.network_traffic.get_usage(self.interface)
format_values.update(network_usage)
if self.graph_type == 'input':
kbs = network_usage['bytes_recv']
elif self.graph_type == 'output':
kbs = network_usage['bytes_sent']
else:
raise Exception("graph_type must be either 'input' or 'output'!")
format_values['network_graph'] = self.get_network_graph(kbs)
format_values['kbs'] = "{0:.1f}".format(round(kbs, 2)).rjust(6)
color = self.get_gradient(kbs, self.colors, self.upper_limit)
else:
color = None
if sysfs_interface_up(self.interface, self.unknown_up):
if not color:
color = self.color_up
format_str = self.format_up
else:
color = self.color_down
format_str = self.format_down
network_info = self.network_info.get_info(self.interface)
format_values.update(network_info)
format_values['interface'] = self.interface
self.output = {
"full_text": format_str.format(**format_values),
'color': color,
}
| true | true |
f72efe28d7c827765fc230987e47746d83b1714e | 1,500 | py | Python | 00Python/day11/basic02.py | HaoZhang95/PythonAndMachineLearning | b897224b8a0e6a5734f408df8c24846a98c553bf | [
"MIT"
] | 937 | 2019-05-08T08:46:25.000Z | 2022-03-31T12:56:07.000Z | 00Python/day11/basic02.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 47 | 2019-09-17T10:06:02.000Z | 2022-03-11T23:46:52.000Z | 00Python/day11/basic02.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 354 | 2019-05-10T02:15:26.000Z | 2022-03-30T05:52:57.000Z | """
__new__()方法, 对象创建的过程,
1- new方法返回一个对象 2- init利用new返回的对象进行属性的添加
"""
class Person(object):
# 监听创建一个实例对象的过程,需要返回一个对象赋值给xiaoming
# new中不return的话,那么久不会执行init方法
def __new__(cls, *args, **kwargs):
print("new")
print((object.__new__(cls)))
return object.__new__(cls)
# 构造方法,当执行init方法的时候对象**已经创建成功**,剩下的是将属性添加到对象中
def __init__(self, name):
print("init")
self.name = name
# 类的toString方法
# def __str__(self):
# return "我的名字是: %s" % self.name
# 监听引用计数为0的时候,python会执行del方法
def __del__(self):
print("再见")
# xioaming的地址和new中return的obj的地址一样,说明new中返回的obj就是xiaoming
xiaoming = Person("小明")
print(xiaoming)
print("=" * 28)
"""
python的单例模式,需要使用到new关键方法
1- 保证返回的对象是同一个,在new中修改
2- 保证对象的属性只能赋值一次,在init方法中修改
3- 一般单例模式中的包含静态方法, 类似于Tools.XX, 不需要创建多个对象来调用同一个静态方法
"""
class Student(object):
# 定义一个类属型保存实例对象
__instance = None
# 类属型保证实例属性只能被赋值一次
__is_first = True
# s1,s2要保证使用一份内存, 需要new的时候返回同一个对象
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self, name, age):
if self.__is_first:
self.name = name
self.age = age
self.__is_first = False
# 静态方法
@staticmethod
def add_num(a, b):
return a + b
s1 = Student("小明", 25)
s2 = Student("小红", 28)
print(s1)
print(s2)
print(s1.name)
print(s2.name)
| 20.547945 | 56 | 0.626 |
class Person(object):
def __new__(cls, *args, **kwargs):
print("new")
print((object.__new__(cls)))
return object.__new__(cls)
def __init__(self, name):
print("init")
self.name = name
def __del__(self):
print("再见")
xiaoming = Person("小明")
print(xiaoming)
print("=" * 28)
class Student(object):
__instance = None
__is_first = True
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self, name, age):
if self.__is_first:
self.name = name
self.age = age
self.__is_first = False
@staticmethod
def add_num(a, b):
return a + b
s1 = Student("小明", 25)
s2 = Student("小红", 28)
print(s1)
print(s2)
print(s1.name)
print(s2.name)
| true | true |
f72efe3d12d989d9d5fbd596ed2881377aad1ab5 | 3,542 | py | Python | test/functional/wallet_encryption.py | Hary2511/xaya | 312ffe32f0bc58d0936f96770c59b57d9862ac35 | [
"MIT"
] | 2 | 2018-08-07T17:27:05.000Z | 2018-08-07T17:28:18.000Z | test/functional/wallet_encryption.py | Hary2511/xaya | 312ffe32f0bc58d0936f96770c59b57d9862ac35 | [
"MIT"
] | null | null | null | test/functional/wallet_encryption.py | Hary2511/xaya | 312ffe32f0bc58d0936f96770c59b57d9862ac35 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "b")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main()
| 43.195122 | 138 | 0.705816 |
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "b")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main()
| true | true |
f72efe4f9f448f396364ed1b99e237c63630816c | 2,612 | py | Python | tests/providers/amazon/aws/hooks/test_aws_dynamodb_hook.py | wileeam/airflow | f46be8152a4d89c57db4ca46f5b3339e4876b723 | [
"Apache-2.0"
] | 1 | 2020-02-17T17:40:14.000Z | 2020-02-17T17:40:14.000Z | tests/providers/amazon/aws/hooks/test_aws_dynamodb_hook.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | 2 | 2021-05-12T12:41:51.000Z | 2021-09-29T17:47:43.000Z | tests/providers/amazon/aws/hooks/test_aws_dynamodb_hook.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import uuid
from airflow.providers.amazon.aws.hooks.aws_dynamodb_hook import AwsDynamoDBHook
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class TestDynamoDBHook(unittest.TestCase):
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_insert_batch_items_dynamodb_table(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default',
table_name='test_airflow', table_keys=['id'], region_name='us-east-1')
# this table needs to be created in production
table = hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table = hook.get_conn().Table('test_airflow')
items = [{'id': str(uuid.uuid4()), 'name': 'airflow'}
for _ in range(10)]
hook.write_batch_data(items)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 10)
if __name__ == '__main__':
unittest.main()
| 32.246914 | 101 | 0.64242 |
import unittest
import uuid
from airflow.providers.amazon.aws.hooks.aws_dynamodb_hook import AwsDynamoDBHook
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class TestDynamoDBHook(unittest.TestCase):
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_insert_batch_items_dynamodb_table(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default',
table_name='test_airflow', table_keys=['id'], region_name='us-east-1')
table = hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table = hook.get_conn().Table('test_airflow')
items = [{'id': str(uuid.uuid4()), 'name': 'airflow'}
for _ in range(10)]
hook.write_batch_data(items)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 10)
if __name__ == '__main__':
unittest.main()
| true | true |
f72efe56b3a6ea76c20c2859404736eb8275a81d | 12,811 | py | Python | install.py | ArgusHomeSecurity/argus_management | d2a6fb0004f23963f1cbc9cd07f40596abaf8b7b | [
"MIT"
] | null | null | null | install.py | ArgusHomeSecurity/argus_management | d2a6fb0004f23963f1cbc9cd07f40596abaf8b7b | [
"MIT"
] | 1 | 2021-02-24T12:04:11.000Z | 2021-02-24T12:04:11.000Z | install.py | ArPIHomeSecurity/arpi_management | d2a6fb0004f23963f1cbc9cd07f40596abaf8b7b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
Script for installing the components of the ArPI home security system to a running Raspberry PI Zero Wifi host.
It uses the configuration file install.yaml!
---
@author: Gábor Kovács
@copyright: 2017 arpi-security.info. All rights reserved.
@contact: gkovacs81@gmail.com
"""
import json
import logging
import subprocess
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os import system
from os.path import join, exists
from socket import gaierror
from time import sleep
import paramiko
import yaml
from paramiko.ssh_exception import AuthenticationException, NoValidConnectionsError
from scp import SCPClient
from utils import (
deep_copy,
execute_remote,
generate_SSH_key,
list_copy,
print_lines,
show_progress
)
CONFIG = {}
logging.basicConfig(format="%(message)s")
logger = logging.getLogger()
logging.getLogger("paramiko").setLevel(logging.CRITICAL)
__all__ = []
__version__ = 0.1
__date__ = "2017-08-21"
__updated__ = "2019-08-21"
program_shortdesc = __import__("__main__").__doc__.split("---")[0]
program_license = """%s
Created by gkovacs81@gmail.com on %s.
Copyright 2019 arpi-security.info. All rights reserved.
USAGE
""" % (
program_shortdesc,
str(__date__),
)
def get_connection():
try:
logger.info(
"Connecting with private key in '%s' %s@%s",
CONFIG["arpi_key_name"],
CONFIG["arpi_username"],
CONFIG["arpi_hostname"],
)
private_key = None
if exists(CONFIG["arpi_key_name"]):
private_key = paramiko.RSAKey.from_private_key_file(
CONFIG["arpi_key_name"], CONFIG["arpi_password"]
)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
CONFIG["arpi_hostname"],
username=CONFIG["arpi_username"],
password=CONFIG["arpi_password"],
pkey=private_key,
)
logger.info("Connected")
except (AuthenticationException, NoValidConnectionsError, gaierror):
try:
logger.info("Connecting %s@%s", CONFIG["default_username"], CONFIG["default_hostname"])
ssh.connect(
CONFIG["default_hostname"],
username=CONFIG["default_username"],
password=CONFIG["default_password"],
)
logger.info("Connected")
except (NoValidConnectionsError, gaierror):
raise Exception("Can't connect to the host!")
return ssh
def install_environment():
"""
Install prerequisites to an empty Raspberry PI.
"""
if not exists(CONFIG["arpi_key_name"]) and \
not exists(CONFIG["arpi_key_name"] + ".pub"):
generate_SSH_key(CONFIG["arpi_key_name"], CONFIG["arpi_password"])
dhparam_file = "arpi_dhparam.pem"
if not exists(dhparam_file):
logger.info("dhparam (%s) generating", dhparam_file)
system(f"openssl dhparam -out {dhparam_file} {CONFIG['dhparam_size']}")
else:
logger.info("dhparam (%s) already exists", dhparam_file)
system(f"openssl dhparam -in {dhparam_file} -text | head -3")
# create the env variables string because paramiko update_evironment ignores them
arguments = {
"ARPI_PASSWORD": CONFIG["arpi_password"],
"ARGUS_DB_SCHEMA": CONFIG["argus_db_schema"],
"ARGUS_DB_USERNAME": CONFIG["argus_db_username"],
"ARGUS_DB_PASSWORD": CONFIG["argus_db_password"],
"ARPI_HOSTNAME": CONFIG["arpi_hostname"],
"DHPARAM_FILE": join("/tmp", dhparam_file),
# progress
"QUIET": "" if CONFIG["progress"] else "-q",
"PROGRESS": "on" if CONFIG["progress"] else "off"
}
# adding package versions
arguments.update({p.upper(): f"{v}" for p, v in CONFIG["packages"].items() if v})
arguments = [f"export {key}={value}" for key, value in arguments.items()]
arguments = "; ".join(arguments)
ssh = get_connection()
scp = SCPClient(ssh.get_transport(), progress=show_progress if CONFIG["progress"] else None)
scp.put("scripts/install_environment.sh", remote_path=".")
deep_copy(ssh, join(CONFIG["server_path"], "etc"), "/tmp/etc", "**/*", CONFIG["progress"])
list_copy(
ssh,
((dhparam_file, "/tmp"),),
CONFIG["progress"]
)
channel = ssh.get_transport().open_session()
channel.get_pty()
channel.set_combine_stderr(True)
output = channel.makefile("r", -1)
logger.info("Starting install script...")
channel.exec_command(f"{arguments}; ./install_environment.sh")
print_lines(output)
ssh.close()
# waiting for user
# 1. deploy key can timeout
# 2. ssh accept password only from terminal
input("Waiting before deploying public key!")
command = f"ssh-copy-id -i {CONFIG['arpi_key_name']} {CONFIG['arpi_username']}@{CONFIG['default_hostname']}"
logger.info("Deploy public key: %s", command)
while subprocess.call(command, shell=True) != 0:
# retry after 2 seconds
sleep(2)
ssh = get_connection()
execute_remote(
message="Enabling key based ssh authentication",
ssh=ssh,
command="sudo sed -i -E -e 's/.*PasswordAuthentication (yes|no)/PasswordAuthentication no/g' /etc/ssh/sshd_config",
)
execute_remote(message="Restarting the host", ssh=ssh, command="sudo reboot")
def install_component(component, update=False, restart=False):
"""
Install the monitor component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Creating server directories...",
ssh=ssh,
command="mkdir -p server/etc server/scripts server/src server/webapplication",
)
logger.info("Copy common files...")
list_copy(
ssh,
(
(join(CONFIG["server_path"], "Pipfile"), "server"),
(join(CONFIG["server_path"], "Pipfile.lock"), "server"),
(join(CONFIG["server_path"], f".env_{CONFIG['environment']}"), "server/.env"),
(join(CONFIG["server_path"], "src", "data.py"), join("server", "src", "data.py")),
(join(CONFIG["server_path"], "src", "hash.py"), join("server", "src", "hash.py")),
(join(CONFIG["server_path"], "src", "models.py"), join("server", "src", "models.py")),
), CONFIG["progress"]
)
deep_copy(
ssh, join(CONFIG["server_path"], "src", "tools"), join("server", "src", "tools"), "**/*.py", CONFIG["progress"]
)
logger.info("Copy component '%s'...", component)
deep_copy(
ssh,
join(CONFIG["server_path"], "src", component),
join("server", "src", component),
"**/*.py",
CONFIG["progress"]
)
if update:
execute_remote(
message="Start installing python packages on sytem...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 pipenv install --system",
)
execute_remote(
message="Create virtual environment with python3 for argus...",
ssh=ssh,
command="cd server; PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
execute_remote(
message="Create virtual environment with python3 for root...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_monitor.service argus_server.service",
)
ssh.close()
def install_server(update=False, restart=False):
"""
Install the server component to a Raspberry PI.
"""
install_component("server", update=update, restart=restart)
def install_monitoring(update=False, restart=False):
"""
Install the monitor component to a Raspberry PI.
"""
install_component("monitoring", update=update, restart=restart)
def install_database():
"""
Install the database component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Initialize database...",
ssh=ssh,
command="cd server; pipenv run flask db init",
)
execute_remote(
message="Migrate database...",
ssh=ssh,
command="cd server; pipenv run flask db migrate",
)
execute_remote(
message="Upgrade database...",
ssh=ssh,
command="cd server; pipenv run flask db upgrade",
)
execute_remote(
message="Updating database content...",
ssh=ssh,
command=f"cd server; pipenv run src/data.py -d -c {CONFIG['argus_db_content']}",
)
ssh.close()
def install_webapplication(restart=False):
"""
Install the web application component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Delete old webapplication on remote site...",
ssh=ssh,
command="rm -R server/webapplication || true",
)
target = join("server", "webapplication")
logger.info("Copy web application: %s => %s", CONFIG["webapplication_path"], target)
deep_copy(ssh, CONFIG["webapplication_path"], target, "**/*", CONFIG["progress"])
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_server.service",
)
def main(argv=None): # IGNORE:C0111
"""Command line options."""
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
try:
# Setup argument parser
parser = ArgumentParser(
description=program_license, formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="count",
help="set verbosity level [default: %(default)s]",
)
parser.add_argument(
"component",
choices=["environment", "server", "monitoring", "webapplication", "database"],
)
parser.add_argument(
"-e",
"--env",
dest="environment",
default="",
help="Select a different config (install.{environment}.yaml)",
)
parser.add_argument(
"-r",
"--restart",
action="store_true",
help="Restart depending service(s) after deployment",
)
parser.add_argument(
"-u",
"--update",
action="store_true",
help="Update the python environment for the depending service(s) after deployment",
)
parser.add_argument(
"-p",
"--progress",
action="store_true",
help="Show progress bars",
)
# Process arguments
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
logger.info("Verbose mode on")
else:
logger.setLevel(logging.INFO)
config_filename = __file__.replace(".py", ".yaml")
if args.environment:
config_filename = config_filename.replace(".yaml", "." + args.environment + ".yaml")
logger.info("Working with %s", args)
logger.info("Working from %s", config_filename)
with open(config_filename, "r") as stream:
global CONFIG
CONFIG = yaml.load(stream, Loader=yaml.FullLoader)
CONFIG["progress"] = args.progress
logger.info("Working with configuration: \n%s", json.dumps(CONFIG, indent=4, sort_keys=True))
input("Waiting before starting the installation to verify the configuration!")
if args.component == "environment":
install_environment()
elif args.component == "server":
install_server(args.update, args.restart)
elif args.component == "monitoring":
install_monitoring(args.update, args.restart)
elif args.component == "webapplication":
install_webapplication(args.restart)
elif args.component == "database":
install_database()
else:
logger.error("Unknown component: %s", args.component)
logger.info("Finished successfully!")
return 0
except KeyboardInterrupt:
# handle keyboard interrupt ###
logger.info("\n\nCancelled!\n")
return 0
except Exception:
logger.exception("Failed to execute!")
return 2
if __name__ == "__main__":
sys.exit(main())
| 31.246341 | 123 | 0.609788 |
import json
import logging
import subprocess
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os import system
from os.path import join, exists
from socket import gaierror
from time import sleep
import paramiko
import yaml
from paramiko.ssh_exception import AuthenticationException, NoValidConnectionsError
from scp import SCPClient
from utils import (
deep_copy,
execute_remote,
generate_SSH_key,
list_copy,
print_lines,
show_progress
)
CONFIG = {}
logging.basicConfig(format="%(message)s")
logger = logging.getLogger()
logging.getLogger("paramiko").setLevel(logging.CRITICAL)
__all__ = []
__version__ = 0.1
__date__ = "2017-08-21"
__updated__ = "2019-08-21"
program_shortdesc = __import__("__main__").__doc__.split("---")[0]
program_license = """%s
Created by gkovacs81@gmail.com on %s.
Copyright 2019 arpi-security.info. All rights reserved.
USAGE
""" % (
program_shortdesc,
str(__date__),
)
def get_connection():
try:
logger.info(
"Connecting with private key in '%s' %s@%s",
CONFIG["arpi_key_name"],
CONFIG["arpi_username"],
CONFIG["arpi_hostname"],
)
private_key = None
if exists(CONFIG["arpi_key_name"]):
private_key = paramiko.RSAKey.from_private_key_file(
CONFIG["arpi_key_name"], CONFIG["arpi_password"]
)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
CONFIG["arpi_hostname"],
username=CONFIG["arpi_username"],
password=CONFIG["arpi_password"],
pkey=private_key,
)
logger.info("Connected")
except (AuthenticationException, NoValidConnectionsError, gaierror):
try:
logger.info("Connecting %s@%s", CONFIG["default_username"], CONFIG["default_hostname"])
ssh.connect(
CONFIG["default_hostname"],
username=CONFIG["default_username"],
password=CONFIG["default_password"],
)
logger.info("Connected")
except (NoValidConnectionsError, gaierror):
raise Exception("Can't connect to the host!")
return ssh
def install_environment():
if not exists(CONFIG["arpi_key_name"]) and \
not exists(CONFIG["arpi_key_name"] + ".pub"):
generate_SSH_key(CONFIG["arpi_key_name"], CONFIG["arpi_password"])
dhparam_file = "arpi_dhparam.pem"
if not exists(dhparam_file):
logger.info("dhparam (%s) generating", dhparam_file)
system(f"openssl dhparam -out {dhparam_file} {CONFIG['dhparam_size']}")
else:
logger.info("dhparam (%s) already exists", dhparam_file)
system(f"openssl dhparam -in {dhparam_file} -text | head -3")
# create the env variables string because paramiko update_evironment ignores them
arguments = {
"ARPI_PASSWORD": CONFIG["arpi_password"],
"ARGUS_DB_SCHEMA": CONFIG["argus_db_schema"],
"ARGUS_DB_USERNAME": CONFIG["argus_db_username"],
"ARGUS_DB_PASSWORD": CONFIG["argus_db_password"],
"ARPI_HOSTNAME": CONFIG["arpi_hostname"],
"DHPARAM_FILE": join("/tmp", dhparam_file),
# progress
"QUIET": "" if CONFIG["progress"] else "-q",
"PROGRESS": "on" if CONFIG["progress"] else "off"
}
# adding package versions
arguments.update({p.upper(): f"{v}" for p, v in CONFIG["packages"].items() if v})
arguments = [f"export {key}={value}" for key, value in arguments.items()]
arguments = "; ".join(arguments)
ssh = get_connection()
scp = SCPClient(ssh.get_transport(), progress=show_progress if CONFIG["progress"] else None)
scp.put("scripts/install_environment.sh", remote_path=".")
deep_copy(ssh, join(CONFIG["server_path"], "etc"), "/tmp/etc", "**/*", CONFIG["progress"])
list_copy(
ssh,
((dhparam_file, "/tmp"),),
CONFIG["progress"]
)
channel = ssh.get_transport().open_session()
channel.get_pty()
channel.set_combine_stderr(True)
output = channel.makefile("r", -1)
logger.info("Starting install script...")
channel.exec_command(f"{arguments}; ./install_environment.sh")
print_lines(output)
ssh.close()
# waiting for user
# 1. deploy key can timeout
# 2. ssh accept password only from terminal
input("Waiting before deploying public key!")
command = f"ssh-copy-id -i {CONFIG['arpi_key_name']} {CONFIG['arpi_username']}@{CONFIG['default_hostname']}"
logger.info("Deploy public key: %s", command)
while subprocess.call(command, shell=True) != 0:
# retry after 2 seconds
sleep(2)
ssh = get_connection()
execute_remote(
message="Enabling key based ssh authentication",
ssh=ssh,
command="sudo sed -i -E -e 's/.*PasswordAuthentication (yes|no)/PasswordAuthentication no/g' /etc/ssh/sshd_config",
)
execute_remote(message="Restarting the host", ssh=ssh, command="sudo reboot")
def install_component(component, update=False, restart=False):
ssh = get_connection()
execute_remote(
message="Creating server directories...",
ssh=ssh,
command="mkdir -p server/etc server/scripts server/src server/webapplication",
)
logger.info("Copy common files...")
list_copy(
ssh,
(
(join(CONFIG["server_path"], "Pipfile"), "server"),
(join(CONFIG["server_path"], "Pipfile.lock"), "server"),
(join(CONFIG["server_path"], f".env_{CONFIG['environment']}"), "server/.env"),
(join(CONFIG["server_path"], "src", "data.py"), join("server", "src", "data.py")),
(join(CONFIG["server_path"], "src", "hash.py"), join("server", "src", "hash.py")),
(join(CONFIG["server_path"], "src", "models.py"), join("server", "src", "models.py")),
), CONFIG["progress"]
)
deep_copy(
ssh, join(CONFIG["server_path"], "src", "tools"), join("server", "src", "tools"), "**/*.py", CONFIG["progress"]
)
logger.info("Copy component '%s'...", component)
deep_copy(
ssh,
join(CONFIG["server_path"], "src", component),
join("server", "src", component),
"**/*.py",
CONFIG["progress"]
)
if update:
execute_remote(
message="Start installing python packages on sytem...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 pipenv install --system",
)
execute_remote(
message="Create virtual environment with python3 for argus...",
ssh=ssh,
command="cd server; PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
execute_remote(
message="Create virtual environment with python3 for root...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_monitor.service argus_server.service",
)
ssh.close()
def install_server(update=False, restart=False):
install_component("server", update=update, restart=restart)
def install_monitoring(update=False, restart=False):
install_component("monitoring", update=update, restart=restart)
def install_database():
ssh = get_connection()
execute_remote(
message="Initialize database...",
ssh=ssh,
command="cd server; pipenv run flask db init",
)
execute_remote(
message="Migrate database...",
ssh=ssh,
command="cd server; pipenv run flask db migrate",
)
execute_remote(
message="Upgrade database...",
ssh=ssh,
command="cd server; pipenv run flask db upgrade",
)
execute_remote(
message="Updating database content...",
ssh=ssh,
command=f"cd server; pipenv run src/data.py -d -c {CONFIG['argus_db_content']}",
)
ssh.close()
def install_webapplication(restart=False):
ssh = get_connection()
execute_remote(
message="Delete old webapplication on remote site...",
ssh=ssh,
command="rm -R server/webapplication || true",
)
target = join("server", "webapplication")
logger.info("Copy web application: %s => %s", CONFIG["webapplication_path"], target)
deep_copy(ssh, CONFIG["webapplication_path"], target, "**/*", CONFIG["progress"])
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_server.service",
)
def main(argv=None): # IGNORE:C0111
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
try:
# Setup argument parser
parser = ArgumentParser(
description=program_license, formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="count",
help="set verbosity level [default: %(default)s]",
)
parser.add_argument(
"component",
choices=["environment", "server", "monitoring", "webapplication", "database"],
)
parser.add_argument(
"-e",
"--env",
dest="environment",
default="",
help="Select a different config (install.{environment}.yaml)",
)
parser.add_argument(
"-r",
"--restart",
action="store_true",
help="Restart depending service(s) after deployment",
)
parser.add_argument(
"-u",
"--update",
action="store_true",
help="Update the python environment for the depending service(s) after deployment",
)
parser.add_argument(
"-p",
"--progress",
action="store_true",
help="Show progress bars",
)
# Process arguments
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
logger.info("Verbose mode on")
else:
logger.setLevel(logging.INFO)
config_filename = __file__.replace(".py", ".yaml")
if args.environment:
config_filename = config_filename.replace(".yaml", "." + args.environment + ".yaml")
logger.info("Working with %s", args)
logger.info("Working from %s", config_filename)
with open(config_filename, "r") as stream:
global CONFIG
CONFIG = yaml.load(stream, Loader=yaml.FullLoader)
CONFIG["progress"] = args.progress
logger.info("Working with configuration: \n%s", json.dumps(CONFIG, indent=4, sort_keys=True))
input("Waiting before starting the installation to verify the configuration!")
if args.component == "environment":
install_environment()
elif args.component == "server":
install_server(args.update, args.restart)
elif args.component == "monitoring":
install_monitoring(args.update, args.restart)
elif args.component == "webapplication":
install_webapplication(args.restart)
elif args.component == "database":
install_database()
else:
logger.error("Unknown component: %s", args.component)
logger.info("Finished successfully!")
return 0
except KeyboardInterrupt:
# handle keyboard interrupt ###
logger.info("\n\nCancelled!\n")
return 0
except Exception:
logger.exception("Failed to execute!")
return 2
if __name__ == "__main__":
sys.exit(main())
| true | true |
f72efec7f239fcde6bd467031c71d0a0a0d48054 | 3,661 | py | Python | DNCNN/common.py | Khanhnn00/blind_sr_denoise | 3153f90d20fd884ab69b47c30c685e0175276055 | [
"Apache-2.0"
] | null | null | null | DNCNN/common.py | Khanhnn00/blind_sr_denoise | 3153f90d20fd884ab69b47c30c685e0175276055 | [
"Apache-2.0"
] | null | null | null | DNCNN/common.py | Khanhnn00/blind_sr_denoise | 3153f90d20fd884ab69b47c30c685e0175276055 | [
"Apache-2.0"
] | null | null | null | import os
import random
import numpy as np
import scipy.misc as misc
import imageio
from tqdm import tqdm
import cv2
from PIL import Image
import torch
import torch.nn.functional as F
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
BINARY_EXTENSIONS = ['.npy']
BENCHMARK = ['Set5', 'Set14', 'B100', 'Urban100', 'Manga109', 'DIV2K', 'DF2K']
####################
# Files & IO
####################
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def is_binary_file(filename):
return any(filename.endswith(extension) for extension in BINARY_EXTENSIONS)
def _get_paths_from_images(path):
assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '[%s] has no valid image file' % path
return images
def _get_paths_from_binary(path):
assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path
files = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_binary_file(fname):
binary_path = os.path.join(dirpath, fname)
files.append(binary_path)
assert files, '[%s] has no valid binary file' % path
return files
def find_benchmark(dataroot):
bm_list = [dataroot.find(bm)>=0 for bm in BENCHMARK]
if not sum(bm_list) == 0:
bm_idx = bm_list.index(True)
bm_name = BENCHMARK[bm_idx]
else:
bm_name = 'MyImage'
return bm_name
def read_img(path):
# read image by misc or from .npy
# return: Numpy float32, HWC, RGB, [0,255]
img = imageio.imread(path, pilmode='RGB')
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
# image processing
# process on numpy image
####################
def im2tensor01(im_np):
"""Convert numpy to tensor to the gpu"""
im_np = im_np / 255.0 if im_np.dtype == 'uint8' else im_np
im_np = np.ascontiguousarray(im_np)
return torch.FloatTensor(np.transpose(im_np, (2, 0, 1)))
def tensor2im(im_t):
"""Copy the tensor to the cpu & convert to range [0,255]"""
im_np = np.clip(np.round((np.transpose(im_t.squeeze(0).detach().cpu().float().numpy(), (1, 2, 0)) + 1) / 2.0 * 255.0), 0, 255)
return im_np.astype(np.uint8)
def get_patch(img_tar, patch_size):
oh, ow = img_tar.shape[:2]
ip = patch_size
tp = ip
ix = random.randrange(0, ow - ip + 1)
iy = random.randrange(0, oh - ip + 1)
tx, ty = ix, iy
img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]
return img_tar
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
def modcrop(img_in, scale):
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [%d].' % img.ndim)
return img
| 29.055556 | 130 | 0.600656 | import os
import random
import numpy as np
import scipy.misc as misc
import imageio
from tqdm import tqdm
import cv2
from PIL import Image
import torch
import torch.nn.functional as F
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
BINARY_EXTENSIONS = ['.npy']
BENCHMARK = ['Set5', 'Set14', 'B100', 'Urban100', 'Manga109', 'DIV2K', 'DF2K']
n sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '[%s] has no valid image file' % path
return images
def _get_paths_from_binary(path):
assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path
files = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_binary_file(fname):
binary_path = os.path.join(dirpath, fname)
files.append(binary_path)
assert files, '[%s] has no valid binary file' % path
return files
def find_benchmark(dataroot):
bm_list = [dataroot.find(bm)>=0 for bm in BENCHMARK]
if not sum(bm_list) == 0:
bm_idx = bm_list.index(True)
bm_name = BENCHMARK[bm_idx]
else:
bm_name = 'MyImage'
return bm_name
def read_img(path):
img = imageio.imread(path, pilmode='RGB')
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
def tensor2im(im_t):
im_np = np.clip(np.round((np.transpose(im_t.squeeze(0).detach().cpu().float().numpy(), (1, 2, 0)) + 1) / 2.0 * 255.0), 0, 255)
return im_np.astype(np.uint8)
def get_patch(img_tar, patch_size):
oh, ow = img_tar.shape[:2]
ip = patch_size
tp = ip
ix = random.randrange(0, ow - ip + 1)
iy = random.randrange(0, oh - ip + 1)
tx, ty = ix, iy
img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]
return img_tar
def augment(img_list, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
def modcrop(img_in, scale):
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [%d].' % img.ndim)
return img
| true | true |
f72efed312141d7d38b208a997d8ee5a183db48d | 1,290 | py | Python | app/core/tests/test_models.py | slauzinho/recipe-app-api | c05f2d007dc19bab5792742a1e7959b5cf8e95a4 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | slauzinho/recipe-app-api | c05f2d007dc19bab5792742a1e7959b5cf8e95a4 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | slauzinho/recipe-app-api | c05f2d007dc19bab5792742a1e7959b5cf8e95a4 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
""" Test creating a new user with an email is successful"""
email = 'test@example.com'
password = 'testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
""" Test the email for a new user is normalized """
email = 'test@EXAMPLE.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
""" Test creating user is no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
""" Test creating a new super user"""
user = get_user_model().objects.create_superuser(
'test@example.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 32.25 | 69 | 0.651163 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'test@example.com'
password = 'testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@EXAMPLE.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
'test@example.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| true | true |
f72efee2c58c739a39aee6ac1603f9a379173f68 | 960 | py | Python | pics/views.py | NinahMo/Pictogram | d7b1ad7af253e6b3a34c2495b370328bbc051059 | [
"Unlicense"
] | null | null | null | pics/views.py | NinahMo/Pictogram | d7b1ad7af253e6b3a34c2495b370328bbc051059 | [
"Unlicense"
] | null | null | null | pics/views.py | NinahMo/Pictogram | d7b1ad7af253e6b3a34c2495b370328bbc051059 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
from .models import Pics,categories
# Create your views here.
def welcome(request):
return render(request, 'welcome.html')
def pictogram(request):
images = Pics.objects.all()
return render(request, 'pictogram.html', {'images':images})
def show(request, image_id):
image = Pics.objects.get(id=image_id)
return render(request, 'show.html', {'image':image})
def search_results(request):
if 'categories' in request.GET and request.GET["categories"]:
search_term = request.GET.get("categories")
searched_categories = categories.search_by_categories(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"categories":searched_categories})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message}) | 34.285714 | 98 | 0.710417 | from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
from .models import Pics,categories
def welcome(request):
return render(request, 'welcome.html')
def pictogram(request):
images = Pics.objects.all()
return render(request, 'pictogram.html', {'images':images})
def show(request, image_id):
image = Pics.objects.get(id=image_id)
return render(request, 'show.html', {'image':image})
def search_results(request):
if 'categories' in request.GET and request.GET["categories"]:
search_term = request.GET.get("categories")
searched_categories = categories.search_by_categories(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"categories":searched_categories})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message}) | true | true |
f72effa674b9c81ff3e1f7f6616cfc19ae50580c | 3,781 | py | Python | fast_pixel_cnn_pp/test_end_to_end.py | tinyrobots/Generalized-PixelVAE | 8d8c6033e14bf2ce37749bd2604e4a90459ff3d4 | [
"MIT"
] | 510 | 2017-02-21T17:18:51.000Z | 2022-02-02T17:12:46.000Z | fast_pixel_cnn_pp/test_end_to_end.py | jinxu06/fast-pixel-cnn | ee99634be08c726c3da7e8ba2675c8d1448e15af | [
"MIT"
] | 4 | 2017-08-24T18:20:11.000Z | 2021-07-05T06:00:56.000Z | fast_pixel_cnn_pp/test_end_to_end.py | jinxu06/fast-pixel-cnn | ee99634be08c726c3da7e8ba2675c8d1448e15af | [
"MIT"
] | 82 | 2017-02-21T23:16:28.000Z | 2021-09-18T13:04:59.000Z | from . import model
from . import fast_nn
import tensorflow as tf
import numpy as np
import os
import unittest
class FastPixelCNNPPEndToEndTest(tf.test.TestCase):
def test_end_to_end(self):
with self.test_session() as sess:
print('Creating model')
image_size = (10, 32, 32, 4)
batch_size, image_height, image_width, image_channels = image_size
# Create placeholders.
row_input = tf.placeholder(
tf.float32, [batch_size, 1, image_width, image_channels],
name='row_input')
pixel_input = tf.placeholder(
tf.float32, [batch_size, 1, 1, image_channels],
name='pixel_input')
row_id = tf.placeholder(tf.int32, [], name='row_id')
col_id = tf.placeholder(tf.int32, [], name='col_id')
ema = tf.train.ExponentialMovingAverage(0.9995)
# Create the model.
model_spec = tf.make_template('model', model.model_spec)
sample, fast_nn_out, v_stack = model_spec(
row_input, pixel_input, row_id, col_id, image_size)
# Initialize the caches.
cache_variables = [
v for v in tf.global_variables() if 'cache' in v.name
]
sess.run(tf.variables_initializer(cache_variables))
# Load the pretrained model
print('Restoring variables')
vars_to_restore = {
k: v
for k, v in ema.variables_to_restore().items()
if 'cache' not in k
}
saver = tf.train.Saver(vars_to_restore)
ckpt_path = None
assert ckpt_path, 'Provide a path to the checkpoint in this file'
saver.restore(sess, ckpt_path)
# Create the fixed random input.
np.random.seed(2702)
x = np.random.randint(0, 256, size=(10, 32, 32, 3))
x = np.cast[np.float32]((x - 127.5) / 127.5)
x_pad = np.concatenate(
(x, np.ones((batch_size, 32, 32, 1))), axis=3)
x_downshift = fast_nn.down_shift(x_pad)
x_rightshift = fast_nn.right_shift(x_pad)
# Holds the output.
num_output_features = 10 * 10
output_features = np.zeros(
(batch_size, 32, 32, num_output_features))
# Compute all features.
print('Computing features')
sess.run(fast_nn.reset_cache_op())
for row in range(image_height):
x_row_input = x_downshift[:, row:(row + 1), :, :]
sess.run(v_stack, {row_input: x_row_input, row_id: row})
for col in range(image_width):
x_pixel_input = x_rightshift[:, row:(row + 1),
col:(col + 1), :]
feed_dict = {
row_id: row,
col_id: col,
pixel_input: x_pixel_input
}
pixel_features = sess.run(fast_nn_out, feed_dict)
output_features[:, row:(row + 1), col:(
col + 1), :] = pixel_features
ground_truth_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'ground_truth_output.npy')
ground_truth_features = np.load(ground_truth_file)
total_features = np.prod(output_features[0].shape)
for i in range(batch_size):
self.assertTrue(
np.allclose(
output_features[i, :, :, :],
ground_truth_features[i, :, :, :],
atol=1e-4))
| 39.8 | 89 | 0.519968 | from . import model
from . import fast_nn
import tensorflow as tf
import numpy as np
import os
import unittest
class FastPixelCNNPPEndToEndTest(tf.test.TestCase):
def test_end_to_end(self):
with self.test_session() as sess:
print('Creating model')
image_size = (10, 32, 32, 4)
batch_size, image_height, image_width, image_channels = image_size
row_input = tf.placeholder(
tf.float32, [batch_size, 1, image_width, image_channels],
name='row_input')
pixel_input = tf.placeholder(
tf.float32, [batch_size, 1, 1, image_channels],
name='pixel_input')
row_id = tf.placeholder(tf.int32, [], name='row_id')
col_id = tf.placeholder(tf.int32, [], name='col_id')
ema = tf.train.ExponentialMovingAverage(0.9995)
model_spec = tf.make_template('model', model.model_spec)
sample, fast_nn_out, v_stack = model_spec(
row_input, pixel_input, row_id, col_id, image_size)
cache_variables = [
v for v in tf.global_variables() if 'cache' in v.name
]
sess.run(tf.variables_initializer(cache_variables))
print('Restoring variables')
vars_to_restore = {
k: v
for k, v in ema.variables_to_restore().items()
if 'cache' not in k
}
saver = tf.train.Saver(vars_to_restore)
ckpt_path = None
assert ckpt_path, 'Provide a path to the checkpoint in this file'
saver.restore(sess, ckpt_path)
np.random.seed(2702)
x = np.random.randint(0, 256, size=(10, 32, 32, 3))
x = np.cast[np.float32]((x - 127.5) / 127.5)
x_pad = np.concatenate(
(x, np.ones((batch_size, 32, 32, 1))), axis=3)
x_downshift = fast_nn.down_shift(x_pad)
x_rightshift = fast_nn.right_shift(x_pad)
num_output_features = 10 * 10
output_features = np.zeros(
(batch_size, 32, 32, num_output_features))
print('Computing features')
sess.run(fast_nn.reset_cache_op())
for row in range(image_height):
x_row_input = x_downshift[:, row:(row + 1), :, :]
sess.run(v_stack, {row_input: x_row_input, row_id: row})
for col in range(image_width):
x_pixel_input = x_rightshift[:, row:(row + 1),
col:(col + 1), :]
feed_dict = {
row_id: row,
col_id: col,
pixel_input: x_pixel_input
}
pixel_features = sess.run(fast_nn_out, feed_dict)
output_features[:, row:(row + 1), col:(
col + 1), :] = pixel_features
ground_truth_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'ground_truth_output.npy')
ground_truth_features = np.load(ground_truth_file)
total_features = np.prod(output_features[0].shape)
for i in range(batch_size):
self.assertTrue(
np.allclose(
output_features[i, :, :, :],
ground_truth_features[i, :, :, :],
atol=1e-4))
| true | true |
f72f0248d59c0576c9f63814fa86650afed6d006 | 9,579 | py | Python | dowhy/do_why.py | mrklees/dowhy | a9e950fd0cf180dc4cbbca13332638aab9d00c65 | [
"MIT"
] | 3 | 2019-12-21T05:46:21.000Z | 2020-05-19T15:35:02.000Z | dowhy/do_why.py | mrklees/dowhy | a9e950fd0cf180dc4cbbca13332638aab9d00c65 | [
"MIT"
] | null | null | null | dowhy/do_why.py | mrklees/dowhy | a9e950fd0cf180dc4cbbca13332638aab9d00c65 | [
"MIT"
] | 3 | 2019-09-05T10:59:58.000Z | 2021-02-04T02:53:59.000Z | """ Module containing the main model class for the dowhy package.
"""
import logging
from sympy import init_printing
import dowhy.causal_estimators as causal_estimators
import dowhy.causal_refuters as causal_refuters
import dowhy.utils.cli_helpers as cli
from dowhy.causal_estimator import CausalEstimate
from dowhy.causal_graph import CausalGraph
from dowhy.causal_identifier import CausalIdentifier
init_printing() # To display symbolic math symbols
class CausalModel:
"""Main class for storing the causal model state.
"""
def __init__(self, data, treatment, outcome, graph=None,
common_causes=None, instruments=None, estimand_type="ate",
proceed_when_unidentifiable=False,
**kwargs):
"""Initialize data and create a causal graph instance.
Assigns treatment and outcome variables.
Also checks and finds the common causes and instruments for treatment
and outcome.
At least one of graph, common_causes or instruments must be provided.
:param data: a pandas dataframe containing treatment, outcome and other
variables.
:param treatment: name of the treatment variable
:param outcome: name of the outcome variable
:param graph: path to DOT file containing a DAG or a string containing
a DAG specification in DOT format
:param common_causes: names of common causes of treatment and _outcome
:param instruments: names of instrumental variables for the effect of
treatment on outcome
:returns: an instance of CausalModel class
"""
self._data = data
self._treatment = treatment
self._outcome = outcome
self._estimand_type = estimand_type
self._proceed_when_unidentifiable = proceed_when_unidentifiable
if 'logging_level' in kwargs:
logging.basicConfig(level=kwargs['logging_level'])
else:
logging.basicConfig(level=logging.INFO)
# TODO: move the logging level argument to a json file. Tue 20 Feb 2018 06:56:27 PM DST
self.logger = logging.getLogger(__name__)
if graph is None:
self.logger.warning("Causal Graph not provided. DoWhy will construct a graph based on data inputs.")
self._common_causes = common_causes
self._instruments = instruments
if common_causes is not None and instruments is not None:
self._graph = CausalGraph(
self._treatment,
self._outcome,
common_cause_names=self._common_causes,
instrument_names=self._instruments,
observed_node_names=self._data.columns.tolist()
)
elif common_causes is not None:
self._graph = CausalGraph(
self._treatment,
self._outcome,
common_cause_names=self._common_causes,
observed_node_names=self._data.columns.tolist()
)
elif instruments is not None:
self._graph = CausalGraph(
self._treatment,
self._outcome,
instrument_names=self._instruments,
observed_node_names=self._data.columns.tolist()
)
else:
cli.query_yes_no(
"WARN: Are you sure that there are no common causes of treatment and outcome?",
default=None
)
else:
self._graph = CausalGraph(
self._treatment,
self._outcome,
graph,
observed_node_names=self._data.columns.tolist()
)
self._common_causes = self._graph.get_common_causes(self._treatment, self._outcome)
self._instruments = self._graph.get_instruments(self._treatment,
self._outcome)
self._other_variables = kwargs
self.summary()
def identify_effect(self):
"""Identify the causal effect to be estimated, using properties of the causal graph.
:returns: a probability expression for the causal effect if identified, else NULL
"""
self.identifier = CausalIdentifier(self._graph,
self._estimand_type,
proceed_when_unidentifiable=self._proceed_when_unidentifiable)
identified_estimand = self.identifier.identify_effect()
return identified_estimand
def estimate_effect(self, identified_estimand, method_name=None,
test_significance=None, method_params=None):
"""Estimate the identified causal effect.
If method_name is provided, uses the provided method. Else, finds a
suitable method to be used.
:param identified_estimand: a probability expression
that represents the effect to be estimated. Output of
CausalModel.identify_effect method
:param method_name: (optional) name of the estimation method to be used.
:returns: an instance of the CausalEstimate class, containing the causal effect estimate
and other method-dependent information
"""
if method_name is None:
pass
else:
str_arr = method_name.split(".")
identifier_name = str_arr[0]
estimator_name = str_arr[1]
identified_estimand.set_identifier_method(identifier_name)
causal_estimator_class = causal_estimators.get_class_object(estimator_name + "_estimator")
# Check if estimator's target estimand is identified
if identified_estimand.estimands[identifier_name] is None:
self.logger.warning("No valid identified estimand for using instrumental variables method")
estimate = CausalEstimate(None, None, None)
else:
causal_estimator = causal_estimator_class(
self._data,
identified_estimand,
self._treatment, self._outcome,
test_significance=test_significance,
params=method_params
)
estimate = causal_estimator.estimate_effect()
estimate.add_params(
estimand_type=identified_estimand.estimand_type,
estimator_class=causal_estimator_class
)
return estimate
def do(self, x, identified_estimand, method_name=None, method_params=None):
"""Estimate the identified causal effect.
If method_name is provided, uses the provided method. Else, finds a
suitable method to be used.
:param identified_estimand: a probability expression
that represents the effect to be estimated. Output of
CausalModel.identify_effect method
:param method_name: (optional) name of the estimation method to be used.
:returns: an instance of the CausalEstimate class, containing the causal effect estimate
and other method-dependent information
"""
if method_name is None:
pass
else:
str_arr = method_name.split(".")
identifier_name = str_arr[0]
estimator_name = str_arr[1]
identified_estimand.set_identifier_method(identifier_name)
causal_estimator_class = causal_estimators.get_class_object(estimator_name + "_estimator")
# Check if estimator's target estimand is identified
if identified_estimand.estimands[identifier_name] is None:
self.logger.warning("No valid identified estimand for using instrumental variables method")
estimate = CausalEstimate(None, None, None)
else:
causal_estimator = causal_estimator_class(
self._data,
identified_estimand,
self._treatment, self._outcome,
test_significance=False,
params=method_params
)
try:
estimate = causal_estimator.do(x)
except NotImplementedError:
self.logger.error('Do Operation not implemented or not supported for this estimator.')
raise NotImplementedError
return estimate
def refute_estimate(self, estimand, estimate, method_name=None, **kwargs):
"""Refute an estimated causal effect.
If method_name is provided, uses the provided method. Else, finds a
suitable method to use.
:param estimate: an instance of the CausalEstimate class.
:returns: an instance of the RefuteResult class
"""
if method_name is None:
pass
else:
refuter_class = causal_refuters.get_class_object(method_name)
refuter = refuter_class(
self._data,
identified_estimand=estimand,
estimate=estimate,
**kwargs
)
res = refuter.refute_estimate()
return res
def view_model(self, layout="dot"):
"""View the causal DAG.
:returns: a visualization of the graph
"""
self._graph.view_graph(layout)
def summary(self):
"""Print a text summary of the model.
:returns: None
"""
self.logger.info("Model to find the causal effect of treatment {0} on outcome {1}".format(self._treatment, self._outcome))
| 38.939024 | 130 | 0.624178 |
import logging
from sympy import init_printing
import dowhy.causal_estimators as causal_estimators
import dowhy.causal_refuters as causal_refuters
import dowhy.utils.cli_helpers as cli
from dowhy.causal_estimator import CausalEstimate
from dowhy.causal_graph import CausalGraph
from dowhy.causal_identifier import CausalIdentifier
init_printing()
class CausalModel:
def __init__(self, data, treatment, outcome, graph=None,
common_causes=None, instruments=None, estimand_type="ate",
proceed_when_unidentifiable=False,
**kwargs):
self._data = data
self._treatment = treatment
self._outcome = outcome
self._estimand_type = estimand_type
self._proceed_when_unidentifiable = proceed_when_unidentifiable
if 'logging_level' in kwargs:
logging.basicConfig(level=kwargs['logging_level'])
else:
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
if graph is None:
self.logger.warning("Causal Graph not provided. DoWhy will construct a graph based on data inputs.")
self._common_causes = common_causes
self._instruments = instruments
if common_causes is not None and instruments is not None:
self._graph = CausalGraph(
self._treatment,
self._outcome,
common_cause_names=self._common_causes,
instrument_names=self._instruments,
observed_node_names=self._data.columns.tolist()
)
elif common_causes is not None:
self._graph = CausalGraph(
self._treatment,
self._outcome,
common_cause_names=self._common_causes,
observed_node_names=self._data.columns.tolist()
)
elif instruments is not None:
self._graph = CausalGraph(
self._treatment,
self._outcome,
instrument_names=self._instruments,
observed_node_names=self._data.columns.tolist()
)
else:
cli.query_yes_no(
"WARN: Are you sure that there are no common causes of treatment and outcome?",
default=None
)
else:
self._graph = CausalGraph(
self._treatment,
self._outcome,
graph,
observed_node_names=self._data.columns.tolist()
)
self._common_causes = self._graph.get_common_causes(self._treatment, self._outcome)
self._instruments = self._graph.get_instruments(self._treatment,
self._outcome)
self._other_variables = kwargs
self.summary()
def identify_effect(self):
self.identifier = CausalIdentifier(self._graph,
self._estimand_type,
proceed_when_unidentifiable=self._proceed_when_unidentifiable)
identified_estimand = self.identifier.identify_effect()
return identified_estimand
def estimate_effect(self, identified_estimand, method_name=None,
test_significance=None, method_params=None):
if method_name is None:
pass
else:
str_arr = method_name.split(".")
identifier_name = str_arr[0]
estimator_name = str_arr[1]
identified_estimand.set_identifier_method(identifier_name)
causal_estimator_class = causal_estimators.get_class_object(estimator_name + "_estimator")
if identified_estimand.estimands[identifier_name] is None:
self.logger.warning("No valid identified estimand for using instrumental variables method")
estimate = CausalEstimate(None, None, None)
else:
causal_estimator = causal_estimator_class(
self._data,
identified_estimand,
self._treatment, self._outcome,
test_significance=test_significance,
params=method_params
)
estimate = causal_estimator.estimate_effect()
estimate.add_params(
estimand_type=identified_estimand.estimand_type,
estimator_class=causal_estimator_class
)
return estimate
def do(self, x, identified_estimand, method_name=None, method_params=None):
if method_name is None:
pass
else:
str_arr = method_name.split(".")
identifier_name = str_arr[0]
estimator_name = str_arr[1]
identified_estimand.set_identifier_method(identifier_name)
causal_estimator_class = causal_estimators.get_class_object(estimator_name + "_estimator")
# Check if estimator's target estimand is identified
if identified_estimand.estimands[identifier_name] is None:
self.logger.warning("No valid identified estimand for using instrumental variables method")
estimate = CausalEstimate(None, None, None)
else:
causal_estimator = causal_estimator_class(
self._data,
identified_estimand,
self._treatment, self._outcome,
test_significance=False,
params=method_params
)
try:
estimate = causal_estimator.do(x)
except NotImplementedError:
self.logger.error('Do Operation not implemented or not supported for this estimator.')
raise NotImplementedError
return estimate
def refute_estimate(self, estimand, estimate, method_name=None, **kwargs):
if method_name is None:
pass
else:
refuter_class = causal_refuters.get_class_object(method_name)
refuter = refuter_class(
self._data,
identified_estimand=estimand,
estimate=estimate,
**kwargs
)
res = refuter.refute_estimate()
return res
def view_model(self, layout="dot"):
self._graph.view_graph(layout)
def summary(self):
self.logger.info("Model to find the causal effect of treatment {0} on outcome {1}".format(self._treatment, self._outcome))
| true | true |
f72f03ee4306e4479f46e2f00d5347764d7ca9b6 | 6,514 | py | Python | lib/flows/cron/compactors_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 3 | 2016-02-20T13:06:31.000Z | 2017-12-15T12:09:01.000Z | lib/flows/cron/compactors_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 3 | 2020-09-11T12:54:50.000Z | 2020-09-11T12:55:01.000Z | lib/flows/cron/compactors_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Tests for grr.lib.flows.cron.compactors."""
# pylint: disable=unused-import, g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import, g-bad-import-order
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class PackedVersionedCollectionCompactorTest(test_lib.FlowTestsBaseclass):
"""Test for PackedVersionedCollectionCompactor."""
def testCompactsSingleCollection(self):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
# Collection is not compacted, so recorded size is 0.
fd = aff4.FACTORY.Open("aff4:/tmp/coll", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
# Run the compactor.
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
# Collection is compacted now, so recorded size is 1.
fd = aff4.FACTORY.Open("aff4:/tmp/coll", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 1)
def testNotificationIsRemovedAfterCompaction(self):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
# Check that there's 1 compaction notification for our collection.
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 1)
# Run the compactor.
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
# Check that notification for our collection is deleted after compaction.
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 0)
def testNewNotificationsAreNotRemovedAfterCompaction(self):
def AddNewElementToCollection(*unused_args, **unused_kwargs):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
AddNewElementToCollection()
# Check that there's 1 compaction notification for our collection.
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 1)
# When Compact() is called on collection, we add additional element to
# the collection and notification gets written to the data store.
# This notification shouldn't be deleted after compaction, because
# it was written during the compaction, and therefore there are
# probably some uncompacted elements that should be compacted during
# then next compaction round.
with utils.Stubber(aff4.PackedVersionedCollection, "Compact",
AddNewElementToCollection):
# Run the compactor.
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
# Check that notification for our collection is deleted after compaction.
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 1)
def testCompactsTwoCollections(self):
with aff4.FACTORY.Create("aff4:/tmp/coll1", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
with aff4.FACTORY.Create("aff4:/tmp/coll2", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
# Collection is not compacted, so recorded size is 0 for both collections.
fd = aff4.FACTORY.Open("aff4:/tmp/coll1", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
fd = aff4.FACTORY.Open("aff4:/tmp/coll2", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
# Run the compactor.
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
# Collection is not compacted, so recorded size is 1 for both collections.
fd = aff4.FACTORY.Open("aff4:/tmp/coll1", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 1)
fd = aff4.FACTORY.Open("aff4:/tmp/coll2", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 1)
def testSecondConsecutiveRunDoesNothing(self):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
# Collection is not compacted, so recorded size is 0.
fd = aff4.FACTORY.Open("aff4:/tmp/coll", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
# Run the compactor and check that it reports that our collection
# got compacted.
flow_urn = flow.GRRFlow.StartFlow(
flow_name="PackedVersionedCollectionCompactor", sync=True,
token=self.token)
flow_fd = aff4.FACTORY.Open(flow_urn, token=self.token)
self.assertTrue(list(l.log_message for l in flow_fd.GetLog()
if "aff4:/tmp/coll" in l.log_message))
# Run the compactor again and check that our collection isn't
# mentioned.
flow_urn = flow.GRRFlow.StartFlow(
flow_name="PackedVersionedCollectionCompactor", sync=True,
token=self.token)
flow_fd = aff4.FACTORY.Open(flow_urn, token=self.token)
self.assertFalse(list(l.log_message for l in flow_fd.GetLog()
if "aff4:/tmp/coll" in l.log_message))
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 40.7125 | 78 | 0.675161 |
from grr.lib import server_plugins
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class PackedVersionedCollectionCompactorTest(test_lib.FlowTestsBaseclass):
def testCompactsSingleCollection(self):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
fd = aff4.FACTORY.Open("aff4:/tmp/coll", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
fd = aff4.FACTORY.Open("aff4:/tmp/coll", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 1)
def testNotificationIsRemovedAfterCompaction(self):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 1)
# Run the compactor.
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
# Check that notification for our collection is deleted after compaction.
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 0)
def testNewNotificationsAreNotRemovedAfterCompaction(self):
def AddNewElementToCollection(*unused_args, **unused_kwargs):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
AddNewElementToCollection()
# Check that there's 1 compaction notification for our collection.
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 1)
# it was written during the compaction, and therefore there are
# probably some uncompacted elements that should be compacted during
# then next compaction round.
with utils.Stubber(aff4.PackedVersionedCollection, "Compact",
AddNewElementToCollection):
# Run the compactor.
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
# Check that notification for our collection is deleted after compaction.
notifications = aff4.PackedVersionedCollection.QueryNotifications(
token=self.token)
notifications = [n for n in notifications
if n == "aff4:/tmp/coll"]
self.assertEqual(len(list(notifications)), 1)
def testCompactsTwoCollections(self):
with aff4.FACTORY.Create("aff4:/tmp/coll1", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
with aff4.FACTORY.Create("aff4:/tmp/coll2", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
# Collection is not compacted, so recorded size is 0 for both collections.
fd = aff4.FACTORY.Open("aff4:/tmp/coll1", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
fd = aff4.FACTORY.Open("aff4:/tmp/coll2", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
# Run the compactor.
for _ in test_lib.TestFlowHelper("PackedVersionedCollectionCompactor",
token=self.token):
pass
# Collection is not compacted, so recorded size is 1 for both collections.
fd = aff4.FACTORY.Open("aff4:/tmp/coll1", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 1)
fd = aff4.FACTORY.Open("aff4:/tmp/coll2", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 1)
def testSecondConsecutiveRunDoesNothing(self):
with aff4.FACTORY.Create("aff4:/tmp/coll", "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdfvalue.GrrMessage(request_id=1))
# Collection is not compacted, so recorded size is 0.
fd = aff4.FACTORY.Open("aff4:/tmp/coll", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
# Run the compactor and check that it reports that our collection
# got compacted.
flow_urn = flow.GRRFlow.StartFlow(
flow_name="PackedVersionedCollectionCompactor", sync=True,
token=self.token)
flow_fd = aff4.FACTORY.Open(flow_urn, token=self.token)
self.assertTrue(list(l.log_message for l in flow_fd.GetLog()
if "aff4:/tmp/coll" in l.log_message))
# Run the compactor again and check that our collection isn't
flow_urn = flow.GRRFlow.StartFlow(
flow_name="PackedVersionedCollectionCompactor", sync=True,
token=self.token)
flow_fd = aff4.FACTORY.Open(flow_urn, token=self.token)
self.assertFalse(list(l.log_message for l in flow_fd.GetLog()
if "aff4:/tmp/coll" in l.log_message))
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| true | true |
f72f0480cd1832c1a453482d9f0e8bb0cd69f5f7 | 6,838 | py | Python | carla-data-export/dataexport.py | zhangyanyu0722/EC523_Project | 72673713bb798023e82ccc257e8c05459c34a4b9 | [
"MIT"
] | 3 | 2020-10-06T19:32:02.000Z | 2020-10-21T04:16:04.000Z | carla-data-export/dataexport.py | zhangyanyu0722/EC523_Project | 72673713bb798023e82ccc257e8c05459c34a4b9 | [
"MIT"
] | null | null | null | carla-data-export/dataexport.py | zhangyanyu0722/EC523_Project | 72673713bb798023e82ccc257e8c05459c34a4b9 | [
"MIT"
] | null | null | null | """
This file contains all the methods responsible for saving the generated data in the correct output format.
"""
import cv2
import numpy as np
import os
import logging
from utils import degrees_to_radians
import json
def save_groundplanes(planes_fname, player_measurements, lidar_height):
from math import cos, sin
""" Saves the groundplane vector of the current frame.
The format of the ground plane file is first three lines describing the file (number of parameters).
The next line is the three parameters of the normal vector, and the last is the height of the normal vector,
which is the same as the distance to the camera in meters.
"""
rotation = player_measurements.transform.rotation
pitch, roll = rotation.pitch, rotation.roll
# Since measurements are in degrees, convert to radians
pitch = degrees_to_radians(pitch)
roll = degrees_to_radians(roll)
# Rotate normal vector (y) wrt. pitch and yaw
normal_vector = [cos(pitch)*sin(roll),
-cos(pitch)*cos(roll),
sin(pitch)
]
normal_vector = map(str, normal_vector)
with open(planes_fname, 'w') as f:
f.write("# Plane\n")
f.write("Width 4\n")
f.write("Height 1\n")
f.write("{} {}\n".format(" ".join(normal_vector), lidar_height))
logging.info("Wrote plane data to %s", planes_fname)
def save_ref_files(OUTPUT_FOLDER, TIME_ON_NEW_EPISODE, PHASE, id):
""" Appends the id of the given record to the files """
# for name in ['train.txt', 'val.txt', 'trainval.txt']:
# path = os.path.join(OUTPUT_FOLDER, name)
# with open(path, 'a') as f:
# f.write("{0:06}".format(id) + '\n')
# logging.info("Wrote reference files to %s", path)
prefix = os.path.join("\".", "data", "carla", PHASE, "label", TIME_ON_NEW_EPISODE)
name = "{0:06}.json\"".format(id)
path = os.path.join(OUTPUT_FOLDER, "label", "{}.json".format(TIME_ON_NEW_EPISODE))
with open(path, "a") as f:
filePath = os.path.join(prefix, name)
f.write(filePath + "\n")
logging.info("Wrote reference files to %s", path)
def save_image_data(filename, image):
logging.info("Wrote image data to %s", filename)
# Convert to correct color format
color_fmt = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(filename, color_fmt)
def save_lidar_data(filename, point_cloud, LIDAR_HEIGHT, format="bin"):
""" Saves lidar data to given filename, according to the lidar data format.
bin is used for KITTI-data format, while .ply is the regular point cloud format
In Unreal, the coordinate system of the engine is defined as, which is the same as the lidar points
z
^ ^ x
| /
| /
|/____> y
This is a left-handed coordinate system, with x being forward, y to the right and z up
See also https://github.com/carla-simulator/carla/issues/498
However, the lidar coordinate system from KITTI is defined as
z
^ ^ x
| /
| /
y<____|/
Which is a right handed coordinate sylstem
Therefore, we need to flip the y axis of the lidar in order to get the correct lidar format for kitti.
This corresponds to the following changes from Carla to Kitti
Carla: X Y Z
KITTI: X -Y Z
NOTE: We do not flip the coordinate system when saving to .ply.
"""
logging.info("Wrote lidar data to %s", filename)
if format == "bin":
lidar_array = [[point[0], -point[1], point[2], 1.0]
for point in point_cloud]
lidar_array = np.array(lidar_array).astype(np.float32)
logging.debug("Lidar min/max of x: {} {}".format(
lidar_array[:, 0].min(), lidar_array[:, 0].max()))
logging.debug("Lidar min/max of y: {} {}".format(
lidar_array[:, 1].min(), lidar_array[:, 0].max()))
logging.debug("Lidar min/max of z: {} {}".format(
lidar_array[:, 2].min(), lidar_array[:, 0].max()))
lidar_array.tofile(filename)
else:
lidar_measurement.point_cloud.save_to_disk(filename)
def save_kitti_data(filename, datapoints):
with open(filename, 'w') as f:
# out_str = "\n".join([str(point) for point in datapoints if point])
# f.write(out_str)
json.dump(datapoints, f)
logging.info("Wrote kitti data to %s", filename)
def save_calibration_matrices(filename, intrinsic_mat, extrinsic_mat):
""" Saves the calibration matrices to a file.
AVOD (and KITTI) refers to P as P=K*[R;t], so we will just store P.
The resulting file will contain:
3x4 p0-p3 Camera P matrix. Contains extrinsic
and intrinsic parameters. (P=K*[R;t])
3x3 r0_rect Rectification matrix, required to transform points
from velodyne to camera coordinate frame.
3x4 tr_velodyne_to_cam Used to transform from velodyne to cam
coordinate frame according to:
Point_Camera = P_cam * R0_rect *
Tr_velo_to_cam *
Point_Velodyne.
3x4 tr_imu_to_velo Used to transform from imu to velodyne coordinate frame. This is not needed since we do not export
imu data.
"""
# KITTI format demands that we flatten in row-major order
ravel_mode = 'C'
P0 = intrinsic_mat
P0 = np.column_stack((P0, np.array([0, 0, 0])))
P0 = np.ravel(P0, order=ravel_mode)
R0 = np.identity(3)
TR_velodyne = np.array([[0, -1, 0],
[0, 0, -1],
[1, 0, 0]])
# Add translation vector from velo to camera. This is 0 because the position of camera and lidar is equal in our configuration.
TR_velodyne = np.column_stack((TR_velodyne, np.array([0, 0, 0])))
TR_imu_to_velo = np.identity(3)
TR_imu_to_velo = np.column_stack((TR_imu_to_velo, np.array([0, 0, 0])))
def write_flat(f, name, arr):
f.write("{}: {}\n".format(name, ' '.join(
map(str, arr.flatten(ravel_mode).squeeze()))))
# All matrices are written on a line with spacing
with open(filename, 'w') as f:
for i in range(4): # Avod expects all 4 P-matrices even though we only use the first
write_flat(f, "P" + str(i), P0)
write_flat(f, "R0_rect", R0)
write_flat(f, "Tr_velo_to_cam", TR_velodyne)
write_flat(f, "TR_imu_to_velo", TR_imu_to_velo)
logging.info("Wrote all calibration matrices to %s", filename)
| 43.278481 | 135 | 0.603832 | import cv2
import numpy as np
import os
import logging
from utils import degrees_to_radians
import json
def save_groundplanes(planes_fname, player_measurements, lidar_height):
from math import cos, sin
rotation = player_measurements.transform.rotation
pitch, roll = rotation.pitch, rotation.roll
pitch = degrees_to_radians(pitch)
roll = degrees_to_radians(roll)
normal_vector = [cos(pitch)*sin(roll),
-cos(pitch)*cos(roll),
sin(pitch)
]
normal_vector = map(str, normal_vector)
with open(planes_fname, 'w') as f:
f.write("# Plane\n")
f.write("Width 4\n")
f.write("Height 1\n")
f.write("{} {}\n".format(" ".join(normal_vector), lidar_height))
logging.info("Wrote plane data to %s", planes_fname)
def save_ref_files(OUTPUT_FOLDER, TIME_ON_NEW_EPISODE, PHASE, id):
prefix = os.path.join("\".", "data", "carla", PHASE, "label", TIME_ON_NEW_EPISODE)
name = "{0:06}.json\"".format(id)
path = os.path.join(OUTPUT_FOLDER, "label", "{}.json".format(TIME_ON_NEW_EPISODE))
with open(path, "a") as f:
filePath = os.path.join(prefix, name)
f.write(filePath + "\n")
logging.info("Wrote reference files to %s", path)
def save_image_data(filename, image):
logging.info("Wrote image data to %s", filename)
color_fmt = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(filename, color_fmt)
def save_lidar_data(filename, point_cloud, LIDAR_HEIGHT, format="bin"):
logging.info("Wrote lidar data to %s", filename)
if format == "bin":
lidar_array = [[point[0], -point[1], point[2], 1.0]
for point in point_cloud]
lidar_array = np.array(lidar_array).astype(np.float32)
logging.debug("Lidar min/max of x: {} {}".format(
lidar_array[:, 0].min(), lidar_array[:, 0].max()))
logging.debug("Lidar min/max of y: {} {}".format(
lidar_array[:, 1].min(), lidar_array[:, 0].max()))
logging.debug("Lidar min/max of z: {} {}".format(
lidar_array[:, 2].min(), lidar_array[:, 0].max()))
lidar_array.tofile(filename)
else:
lidar_measurement.point_cloud.save_to_disk(filename)
def save_kitti_data(filename, datapoints):
with open(filename, 'w') as f:
json.dump(datapoints, f)
logging.info("Wrote kitti data to %s", filename)
def save_calibration_matrices(filename, intrinsic_mat, extrinsic_mat):
ravel_mode = 'C'
P0 = intrinsic_mat
P0 = np.column_stack((P0, np.array([0, 0, 0])))
P0 = np.ravel(P0, order=ravel_mode)
R0 = np.identity(3)
TR_velodyne = np.array([[0, -1, 0],
[0, 0, -1],
[1, 0, 0]])
TR_velodyne = np.column_stack((TR_velodyne, np.array([0, 0, 0])))
TR_imu_to_velo = np.identity(3)
TR_imu_to_velo = np.column_stack((TR_imu_to_velo, np.array([0, 0, 0])))
def write_flat(f, name, arr):
f.write("{}: {}\n".format(name, ' '.join(
map(str, arr.flatten(ravel_mode).squeeze()))))
with open(filename, 'w') as f:
for i in range(4):
write_flat(f, "P" + str(i), P0)
write_flat(f, "R0_rect", R0)
write_flat(f, "Tr_velo_to_cam", TR_velodyne)
write_flat(f, "TR_imu_to_velo", TR_imu_to_velo)
logging.info("Wrote all calibration matrices to %s", filename)
| true | true |
f72f0708d59dc5469fe994c4b30668cfc4355f4a | 22 | py | Python | __init__.py | cosanlab/facesync | bd5922de5729e4e76a6eaae84b45d965660f1545 | [
"MIT"
] | 9 | 2018-07-13T14:06:07.000Z | 2021-12-24T01:53:20.000Z | __init__.py | cosanlab/facesync | bd5922de5729e4e76a6eaae84b45d965660f1545 | [
"MIT"
] | null | null | null | __init__.py | cosanlab/facesync | bd5922de5729e4e76a6eaae84b45d965660f1545 | [
"MIT"
] | 2 | 2019-11-24T00:43:39.000Z | 2020-10-08T05:06:52.000Z | __all__ = ["facesync"] | 22 | 22 | 0.681818 | __all__ = ["facesync"] | true | true |
f72f071336b0f843d1521175037b68c86bc72fb7 | 467 | py | Python | complex/complex/logger.py | jasonamyers/pynash-click | 512e9712dc1de80e76a6815e0df04dd46af45641 | [
"MIT"
] | null | null | null | complex/complex/logger.py | jasonamyers/pynash-click | 512e9712dc1de80e76a6815e0df04dd46af45641 | [
"MIT"
] | null | null | null | complex/complex/logger.py | jasonamyers/pynash-click | 512e9712dc1de80e76a6815e0df04dd46af45641 | [
"MIT"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
file_log_handler = logging.FileHandler('combinator-cli.log')
logger.addHandler(file_log_handler)
stderr_log_handler = logging.StreamHandler()
logger.addHandler(stderr_log_handler)
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
file_log_handler.setFormatter(formatter)
stderr_log_handler.setFormatter(formatter)
| 29.1875 | 70 | 0.815846 | import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
file_log_handler = logging.FileHandler('combinator-cli.log')
logger.addHandler(file_log_handler)
stderr_log_handler = logging.StreamHandler()
logger.addHandler(stderr_log_handler)
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
file_log_handler.setFormatter(formatter)
stderr_log_handler.setFormatter(formatter)
| true | true |
f72f07649189725423a37d1b780363c21568ce47 | 1,124 | py | Python | model-optimizer/extensions/front/mxnet/rnn_param_concat.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/front/mxnet/rnn_param_concat.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | 19 | 2021-03-26T08:11:00.000Z | 2022-02-21T13:06:26.000Z | model-optimizer/extensions/front/mxnet/rnn_param_concat.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | 1 | 2021-07-28T17:30:46.000Z | 2021-07-28T17:30:46.000Z | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.ops.concat import Concat
class RNNParamConcatFrontExtractor(FrontExtractorOp):
op = '_rnn_param_concat'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
data = {
'axis': attrs.int("dim", 1),
}
# update the attributes of the node
Concat.update_node_stat(node, data)
return cls.enabled
| 31.222222 | 73 | 0.728648 |
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.ops.concat import Concat
class RNNParamConcatFrontExtractor(FrontExtractorOp):
op = '_rnn_param_concat'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
data = {
'axis': attrs.int("dim", 1),
}
Concat.update_node_stat(node, data)
return cls.enabled
| true | true |
f72f0880bd5a9c11966123d872540d2c0220fd79 | 2,536 | py | Python | scanapi/evaluators/string_evaluator.py | dubirajara/scanapi | b0c7b40a48a4d60871c0b3cf9c959d0155985180 | [
"MIT"
] | 1 | 2020-06-02T18:08:08.000Z | 2020-06-02T18:08:08.000Z | scanapi/evaluators/string_evaluator.py | dubirajara/scanapi | b0c7b40a48a4d60871c0b3cf9c959d0155985180 | [
"MIT"
] | null | null | null | scanapi/evaluators/string_evaluator.py | dubirajara/scanapi | b0c7b40a48a4d60871c0b3cf9c959d0155985180 | [
"MIT"
] | null | null | null | import logging
import os
import re
import sys
from scanapi.errors import BadConfigurationError, InvalidPythonCodeError
from scanapi.evaluators.code_evaluator import CodeEvaluator
logger = logging.getLogger(__name__)
variable_pattern = re.compile(
r"(?P<something_before>\w*)(?P<start>\${)(?P<variable>\w*)(?P<end>})(?P<something_after>\w*)"
) # ${<variable>}
class StringEvaluator:
def __init__(self, spec_evaluator):
self.spec_evaluator = spec_evaluator
self.api_tree = spec_evaluator.api_tree
self.code_evaluator = CodeEvaluator(self)
def evaluate(self, sequence):
try:
sequence = self.evaluate_env_var(sequence)
except BadConfigurationError as e:
logger.error(e)
sys.exit()
sequence = self.evaluate_custom_var(sequence)
if not self.api_tree.responses:
return sequence
try:
return self.code_evaluator.evaluate(sequence)
except InvalidPythonCodeError as e:
logger.error(e)
sys.exit()
def evaluate_env_var(self, sequence):
matches = variable_pattern.finditer(sequence)
if not matches:
return sequence
for match in matches:
variable_name = match.group("variable")
if any(letter.islower() for letter in variable_name):
continue
try:
variable_value = os.environ[variable_name]
except KeyError as e:
raise BadConfigurationError(e)
sequence = self.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
def evaluate_custom_var(self, sequence):
matches = variable_pattern.finditer(sequence)
if not matches:
return sequence
for match in matches:
variable_name = match.group("variable")
if variable_name.isupper():
continue
if not self.api_tree.custom_vars.get(variable_name):
continue
variable_value = self.spec_evaluator.evaluate(
self.api_tree.custom_vars[variable_name]
)
sequence = self.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
def replace_var_with_value(self, sequence, variable, variable_value):
variable = re.escape(variable)
return re.sub(variable, variable_value, sequence)
| 28.494382 | 97 | 0.621451 | import logging
import os
import re
import sys
from scanapi.errors import BadConfigurationError, InvalidPythonCodeError
from scanapi.evaluators.code_evaluator import CodeEvaluator
logger = logging.getLogger(__name__)
variable_pattern = re.compile(
r"(?P<something_before>\w*)(?P<start>\${)(?P<variable>\w*)(?P<end>})(?P<something_after>\w*)"
)
class StringEvaluator:
def __init__(self, spec_evaluator):
self.spec_evaluator = spec_evaluator
self.api_tree = spec_evaluator.api_tree
self.code_evaluator = CodeEvaluator(self)
def evaluate(self, sequence):
try:
sequence = self.evaluate_env_var(sequence)
except BadConfigurationError as e:
logger.error(e)
sys.exit()
sequence = self.evaluate_custom_var(sequence)
if not self.api_tree.responses:
return sequence
try:
return self.code_evaluator.evaluate(sequence)
except InvalidPythonCodeError as e:
logger.error(e)
sys.exit()
def evaluate_env_var(self, sequence):
matches = variable_pattern.finditer(sequence)
if not matches:
return sequence
for match in matches:
variable_name = match.group("variable")
if any(letter.islower() for letter in variable_name):
continue
try:
variable_value = os.environ[variable_name]
except KeyError as e:
raise BadConfigurationError(e)
sequence = self.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
def evaluate_custom_var(self, sequence):
matches = variable_pattern.finditer(sequence)
if not matches:
return sequence
for match in matches:
variable_name = match.group("variable")
if variable_name.isupper():
continue
if not self.api_tree.custom_vars.get(variable_name):
continue
variable_value = self.spec_evaluator.evaluate(
self.api_tree.custom_vars[variable_name]
)
sequence = self.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
def replace_var_with_value(self, sequence, variable, variable_value):
variable = re.escape(variable)
return re.sub(variable, variable_value, sequence)
| true | true |
f72f0a5368a77bd168688ea65e43ba66486e53c4 | 902 | py | Python | church/migrations/0017_linkedchurch.py | khanhpn/florida | 5e83d0561b9f41ff79383a6a2f0a84d6c8459ef0 | [
"Apache-2.0"
] | 1 | 2021-01-22T02:52:33.000Z | 2021-01-22T02:52:33.000Z | church/migrations/0017_linkedchurch.py | khanhpn/florida | 5e83d0561b9f41ff79383a6a2f0a84d6c8459ef0 | [
"Apache-2.0"
] | null | null | null | church/migrations/0017_linkedchurch.py | khanhpn/florida | 5e83d0561b9f41ff79383a6a2f0a84d6c8459ef0 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-01 06:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('church', '0016_masstime'),
]
operations = [
migrations.CreateModel(
name='LinkedChurch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(max_length=900000, null=True)),
('logo', models.FileField(upload_to='uploads/%Y/%m/%d/')),
('church_url', models.TextField(max_length=90000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'linked_church',
},
),
]
| 32.214286 | 114 | 0.559867 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('church', '0016_masstime'),
]
operations = [
migrations.CreateModel(
name='LinkedChurch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(max_length=900000, null=True)),
('logo', models.FileField(upload_to='uploads/%Y/%m/%d/')),
('church_url', models.TextField(max_length=90000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'linked_church',
},
),
]
| true | true |
f72f0bebfae6ba10ce11191ee930be3d4ec64296 | 4,692 | py | Python | app/model/SimpleSimulator.py | OuissalTAIM/jenkins | 7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1 | [
"BSD-1-Clause"
] | null | null | null | app/model/SimpleSimulator.py | OuissalTAIM/jenkins | 7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1 | [
"BSD-1-Clause"
] | 6 | 2021-02-02T22:52:41.000Z | 2022-03-12T00:37:30.000Z | app/model/SimpleSimulator.py | OuissalTAIM/jenkins | 7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1 | [
"BSD-1-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from app.graph.Graph import *
from app.data.Client import Driver
from app.config.env import MONIKER_SEPARATOR
from app.entity.Transport import Transport
from app.samples.graph_sample import SimpleEntity
class SimpleSimulator:
def __init__(self, graph):
"""
Constructor
:param graph: input graph
"""
self.graph = graph
def build_graph(self):
"""
Get data from data service and build graph
:return: None
"""
# get locations
locations = Driver.get_data("simplelocation")
unit_locations = {}
for location in locations:
for key in location:
if key not in unit_locations:
unit_locations[key] = []
unit_locations[key].append(location[key])
# get infrastructure
infrastructure = Driver.get_data("infrastructure")
# get connections
connections = Driver.get_data("connection")
upstream_to_downstream_connection = {}
for connection in connections:
#TODO: could the headers "From" and "To" be dynamic?
# at least the unit in "Distance[km]" should be
upstream_to_downstream_connection[(connection["From"],connection["To"])] = connection["Distance[km]"]
# build graph
nodes = {}
for unit_location in unit_locations:
if unit_location == "_id":
continue
for location in unit_locations[unit_location]:
if location is None:
continue
key = unit_location + MONIKER_SEPARATOR + location
entity = SimpleEntity(key, 1, 1, 1)
node = Node(entity)
nodes[key] = node
# upstream to downstream
network = Network("CenterAxe")
for node_key in nodes:
node = nodes[node_key]
has_downstream = False
for up_down in upstream_to_downstream_connection:
if node_key != up_down[0]:
continue
from_to = up_down[1].split(MONIKER_SEPARATOR)
if from_to[1] not in unit_locations[from_to[0]]:
continue
distance = upstream_to_downstream_connection[up_down]
for infra in infrastructure:
transport = Transport(infra, distance)
node.add_downstream(transport, up_down[1])
network.add_node(nodes[up_down[1]])
has_downstream = True
if has_downstream:
network.add_node(node)
self.graph = Graph(network)
def build_all_scenarios(self, start, end):
"""
Brute force simulation
:param start: list of starting points
:param end: list of ending points
:return: list
"""
paths = []
for s in start:
for e in end:
se_paths = self.graph.paths(s, e)
if len(se_paths) > 0:
paths.extend(se_paths)
len_paths = len(paths)
masks = ['{{0:0{0}b}}'.format(len_paths).format(n) for n in range(0, 2 ** len_paths)]
scenarios = []
for mask in masks:
scenario = []
scenario_names = []
for i in range(0, len_paths):
if mask[i] == '1':
scenario.append(paths[i])
print(scenario)
scenarios.append(scenario)
return scenarios
def compute(self):
"""
TODO: iterate over all scenarios and compute cost-pv
and other metrics
:return: dictionary
"""
scenarios = self.build_all_scenarios()
for scenario in scenarios:
self.compute(scenario)
return {}
def simulate(self, scenario, plot=False, function="cost_pv"):
"""
Apply function on scenario
:param scenario: list[[string]]
:param plot: boolean, choose to plot the graph or not
:return: Result object
"""
scenario_result = []
for path in scenario:
result = []
next_node = None
path.reverse()
path_to_nodes = [self.graph.get_node(name) for name in path]
for node in path_to_nodes:
result.append(getattr(node, function)(next_node))
next_node = node
path.reverse()
result.reverse()
print("%s | %s" % (path, result))
scenario_result.append(result)
if plot:
self.graph.plot()
return scenario_result
| 34 | 113 | 0.547528 |
from app.graph.Graph import *
from app.data.Client import Driver
from app.config.env import MONIKER_SEPARATOR
from app.entity.Transport import Transport
from app.samples.graph_sample import SimpleEntity
class SimpleSimulator:
def __init__(self, graph):
self.graph = graph
def build_graph(self):
locations = Driver.get_data("simplelocation")
unit_locations = {}
for location in locations:
for key in location:
if key not in unit_locations:
unit_locations[key] = []
unit_locations[key].append(location[key])
infrastructure = Driver.get_data("infrastructure")
connections = Driver.get_data("connection")
upstream_to_downstream_connection = {}
for connection in connections:
upstream_to_downstream_connection[(connection["From"],connection["To"])] = connection["Distance[km]"]
nodes = {}
for unit_location in unit_locations:
if unit_location == "_id":
continue
for location in unit_locations[unit_location]:
if location is None:
continue
key = unit_location + MONIKER_SEPARATOR + location
entity = SimpleEntity(key, 1, 1, 1)
node = Node(entity)
nodes[key] = node
network = Network("CenterAxe")
for node_key in nodes:
node = nodes[node_key]
has_downstream = False
for up_down in upstream_to_downstream_connection:
if node_key != up_down[0]:
continue
from_to = up_down[1].split(MONIKER_SEPARATOR)
if from_to[1] not in unit_locations[from_to[0]]:
continue
distance = upstream_to_downstream_connection[up_down]
for infra in infrastructure:
transport = Transport(infra, distance)
node.add_downstream(transport, up_down[1])
network.add_node(nodes[up_down[1]])
has_downstream = True
if has_downstream:
network.add_node(node)
self.graph = Graph(network)
def build_all_scenarios(self, start, end):
paths = []
for s in start:
for e in end:
se_paths = self.graph.paths(s, e)
if len(se_paths) > 0:
paths.extend(se_paths)
len_paths = len(paths)
masks = ['{{0:0{0}b}}'.format(len_paths).format(n) for n in range(0, 2 ** len_paths)]
scenarios = []
for mask in masks:
scenario = []
scenario_names = []
for i in range(0, len_paths):
if mask[i] == '1':
scenario.append(paths[i])
print(scenario)
scenarios.append(scenario)
return scenarios
def compute(self):
scenarios = self.build_all_scenarios()
for scenario in scenarios:
self.compute(scenario)
return {}
def simulate(self, scenario, plot=False, function="cost_pv"):
scenario_result = []
for path in scenario:
result = []
next_node = None
path.reverse()
path_to_nodes = [self.graph.get_node(name) for name in path]
for node in path_to_nodes:
result.append(getattr(node, function)(next_node))
next_node = node
path.reverse()
result.reverse()
print("%s | %s" % (path, result))
scenario_result.append(result)
if plot:
self.graph.plot()
return scenario_result
| true | true |
f72f0bee0a0d6143e7a6c4be603b00b01323a74d | 1,459 | py | Python | models/vgg16.py | lizhipengTouch/CSA-inpainting | 50602607ddc9153af5bfe627e355b0466fc4944f | [
"CC-BY-4.0"
] | null | null | null | models/vgg16.py | lizhipengTouch/CSA-inpainting | 50602607ddc9153af5bfe627e355b0466fc4944f | [
"CC-BY-4.0"
] | null | null | null | models/vgg16.py | lizhipengTouch/CSA-inpainting | 50602607ddc9153af5bfe627e355b0466fc4944f | [
"CC-BY-4.0"
] | null | null | null | import torch
import torchvision
from torchvision import models
from collections import namedtuple
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features # 获取预训练vgg网络层
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(5):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(5, 10):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(10, 17):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(17, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])
# 定义一个namedtuple类型数据,并包含列表中的属性。
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out # 得到经过不同层的特征值 | 38.394737 | 92 | 0.6244 | import torch
import torchvision
from torchvision import models
from collections import namedtuple
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(5):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(5, 10):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(10, 17):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(17, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out | true | true |
f72f0db8c63a6241b6bfbba2ee4b1714cbdddaeb | 2,061 | py | Python | benchmark/startPyquil969.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil969.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil969.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=33
prog += CZ(1,0) # number=34
prog += H(0) # number=35
prog += RX(-0.7822565707438585,2) # number=31
prog += X(0) # number=29
prog += CNOT(1,0) # number=30
prog += CNOT(0,1) # number=25
prog += CNOT(0,1) # number=40
prog += X(1) # number=41
prog += CNOT(0,1) # number=42
prog += CNOT(0,1) # number=27
prog += CNOT(0,2) # number=22
prog += X(2) # number=23
prog += CNOT(0,2) # number=24
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(3) # number=37
prog += CZ(2,3) # number=38
prog += H(3) # number=39
prog += H(1) # number=18
prog += H(2) # number=19
prog += RX(2.5761059759436304,3) # number=32
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil969.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.134146 | 64 | 0.550704 |
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program()
prog += H(0)
prog += H(1)
prog += H(2)
prog += H(3)
prog += H(4)
prog += H(0)
prog += H(1)
prog += H(2)
prog += H(3)
prog += H(0)
prog += CZ(1,0)
prog += H(0)
prog += RX(-0.7822565707438585,2)
prog += X(0)
prog += CNOT(1,0)
prog += CNOT(0,1)
prog += CNOT(0,1)
prog += X(1)
prog += CNOT(0,1)
prog += CNOT(0,1)
prog += CNOT(0,2)
prog += X(2)
prog += CNOT(0,2)
prog += X(3)
prog += X(0)
prog += X(1)
prog += X(2)
prog += X(3)
prog += H(0)
prog += H(3)
prog += CZ(2,3)
prog += H(3)
prog += H(1)
prog += H(2)
prog += RX(2.5761059759436304,3)
prog += H(3)
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil969.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| false | true |
f72f0df111c27c3d72be292658ad3543f741792e | 6,525 | py | Python | sdk/python/kulado_azure/network/express_route_circuit_peering.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/kulado_azure/network/express_route_circuit_peering.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/kulado_azure/network/express_route_circuit_peering.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Kulado Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class ExpressRouteCircuitPeering(kulado.CustomResource):
azure_asn: kulado.Output[float]
"""
The ASN used by Azure.
"""
express_route_circuit_name: kulado.Output[str]
"""
The name of the ExpressRoute Circuit in which to create the Peering.
"""
microsoft_peering_config: kulado.Output[dict]
"""
A `microsoft_peering_config` block as defined below. Required when `peering_type` is set to `MicrosoftPeering`.
"""
peer_asn: kulado.Output[float]
"""
The Either a 16-bit or a 32-bit ASN. Can either be public or private..
"""
peering_type: kulado.Output[str]
"""
The type of the ExpressRoute Circuit Peering. Acceptable values include `AzurePrivatePeering`, `AzurePublicPeering` and `MicrosoftPeering`. Changing this forces a new resource to be created.
"""
primary_azure_port: kulado.Output[str]
"""
The Primary Port used by Azure for this Peering.
"""
primary_peer_address_prefix: kulado.Output[str]
"""
A `/30` subnet for the primary link.
"""
resource_group_name: kulado.Output[str]
"""
The name of the resource group in which to
create the Express Route Circuit Peering. Changing this forces a new resource to be created.
"""
secondary_azure_port: kulado.Output[str]
"""
The Secondary Port used by Azure for this Peering.
"""
secondary_peer_address_prefix: kulado.Output[str]
"""
A `/30` subnet for the secondary link.
"""
shared_key: kulado.Output[str]
"""
The shared key. Can be a maximum of 25 characters.
"""
vlan_id: kulado.Output[float]
"""
A valid VLAN ID to establish this peering on.
"""
def __init__(__self__, resource_name, opts=None, express_route_circuit_name=None, microsoft_peering_config=None, peer_asn=None, peering_type=None, primary_peer_address_prefix=None, resource_group_name=None, secondary_peer_address_prefix=None, shared_key=None, vlan_id=None, __name__=None, __opts__=None):
"""
Manages an ExpressRoute Circuit Peering.
:param str resource_name: The name of the resource.
:param kulado.ResourceOptions opts: Options for the resource.
:param kulado.Input[str] express_route_circuit_name: The name of the ExpressRoute Circuit in which to create the Peering.
:param kulado.Input[dict] microsoft_peering_config: A `microsoft_peering_config` block as defined below. Required when `peering_type` is set to `MicrosoftPeering`.
:param kulado.Input[float] peer_asn: The Either a 16-bit or a 32-bit ASN. Can either be public or private..
:param kulado.Input[str] peering_type: The type of the ExpressRoute Circuit Peering. Acceptable values include `AzurePrivatePeering`, `AzurePublicPeering` and `MicrosoftPeering`. Changing this forces a new resource to be created.
:param kulado.Input[str] primary_peer_address_prefix: A `/30` subnet for the primary link.
:param kulado.Input[str] resource_group_name: The name of the resource group in which to
create the Express Route Circuit Peering. Changing this forces a new resource to be created.
:param kulado.Input[str] secondary_peer_address_prefix: A `/30` subnet for the secondary link.
:param kulado.Input[str] shared_key: The shared key. Can be a maximum of 25 characters.
:param kulado.Input[float] vlan_id: A valid VLAN ID to establish this peering on.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/express_route_circuit_peering.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if express_route_circuit_name is None:
raise TypeError("Missing required property 'express_route_circuit_name'")
__props__['express_route_circuit_name'] = express_route_circuit_name
__props__['microsoft_peering_config'] = microsoft_peering_config
__props__['peer_asn'] = peer_asn
if peering_type is None:
raise TypeError("Missing required property 'peering_type'")
__props__['peering_type'] = peering_type
if primary_peer_address_prefix is None:
raise TypeError("Missing required property 'primary_peer_address_prefix'")
__props__['primary_peer_address_prefix'] = primary_peer_address_prefix
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if secondary_peer_address_prefix is None:
raise TypeError("Missing required property 'secondary_peer_address_prefix'")
__props__['secondary_peer_address_prefix'] = secondary_peer_address_prefix
__props__['shared_key'] = shared_key
if vlan_id is None:
raise TypeError("Missing required property 'vlan_id'")
__props__['vlan_id'] = vlan_id
__props__['azure_asn'] = None
__props__['primary_azure_port'] = None
__props__['secondary_azure_port'] = None
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure:network/expressRouteCircuitPeering:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.950704 | 308 | 0.70636 |
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class ExpressRouteCircuitPeering(kulado.CustomResource):
azure_asn: kulado.Output[float]
express_route_circuit_name: kulado.Output[str]
microsoft_peering_config: kulado.Output[dict]
peer_asn: kulado.Output[float]
peering_type: kulado.Output[str]
primary_azure_port: kulado.Output[str]
primary_peer_address_prefix: kulado.Output[str]
resource_group_name: kulado.Output[str]
secondary_azure_port: kulado.Output[str]
secondary_peer_address_prefix: kulado.Output[str]
shared_key: kulado.Output[str]
vlan_id: kulado.Output[float]
def __init__(__self__, resource_name, opts=None, express_route_circuit_name=None, microsoft_peering_config=None, peer_asn=None, peering_type=None, primary_peer_address_prefix=None, resource_group_name=None, secondary_peer_address_prefix=None, shared_key=None, vlan_id=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if express_route_circuit_name is None:
raise TypeError("Missing required property 'express_route_circuit_name'")
__props__['express_route_circuit_name'] = express_route_circuit_name
__props__['microsoft_peering_config'] = microsoft_peering_config
__props__['peer_asn'] = peer_asn
if peering_type is None:
raise TypeError("Missing required property 'peering_type'")
__props__['peering_type'] = peering_type
if primary_peer_address_prefix is None:
raise TypeError("Missing required property 'primary_peer_address_prefix'")
__props__['primary_peer_address_prefix'] = primary_peer_address_prefix
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if secondary_peer_address_prefix is None:
raise TypeError("Missing required property 'secondary_peer_address_prefix'")
__props__['secondary_peer_address_prefix'] = secondary_peer_address_prefix
__props__['shared_key'] = shared_key
if vlan_id is None:
raise TypeError("Missing required property 'vlan_id'")
__props__['vlan_id'] = vlan_id
__props__['azure_asn'] = None
__props__['primary_azure_port'] = None
__props__['secondary_azure_port'] = None
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure:network/expressRouteCircuitPeering:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f72f0e3e3ffbf41a6e99a86e70f0c5f90ed14feb | 2,479 | py | Python | attachment_large_object/ir_attachment.py | ShaheenHossain/itpp-labs-misc-addons13 | bf62dc5bc1abdc18d78e9560a286babbe1d0e082 | [
"MIT"
] | null | null | null | attachment_large_object/ir_attachment.py | ShaheenHossain/itpp-labs-misc-addons13 | bf62dc5bc1abdc18d78e9560a286babbe1d0e082 | [
"MIT"
] | null | null | null | attachment_large_object/ir_attachment.py | ShaheenHossain/itpp-labs-misc-addons13 | bf62dc5bc1abdc18d78e9560a286babbe1d0e082 | [
"MIT"
] | 3 | 2020-08-25T01:57:59.000Z | 2021-09-11T15:38:02.000Z | # -*- coding: utf-8 -*-
import logging
import psycopg2
from odoo import api, models
logger = logging.getLogger(__name__)
LARGE_OBJECT_LOCATION = "postgresql:lobject"
class IrAttachment(models.Model):
"""Provide storage as PostgreSQL large objects of attachements with filestore location ``postgresql:lobject``.
Works by overriding the storage handling methods of ``ir.attachment``, as intended by the
default implementation. The overrides call :funct:`super`, so that this is transparent
for other locations.
"""
_name = "ir.attachment"
_inherit = "ir.attachment"
@api.model
def lobject(self, cr, *args):
return cr._cnx.lobject(*args)
@api.model
def _file_write(self, value, checksum):
"""Write the content in a newly created large object.
:param value: base64 encoded payload
:returns str: object id (will be considered the file storage name)
"""
location = self._storage()
if location != LARGE_OBJECT_LOCATION:
return super(IrAttachment, self)._file_write(value, checksum)
lobj = self.lobject(self.env.cr, 0, "wb") # oid=0 means creation
lobj.write(value.decode("base64"))
oid = lobj.oid
return str(oid)
def _file_delete(self, fname):
filestore = False
try:
oid = long(fname)
except Exception:
filestore = True
if not filestore:
try:
return self.lobject(self.env.cr, oid, "rb").unlink()
except (psycopg2.OperationalError, ValueError):
filestore = True
return super(IrAttachment, self)._file_delete(fname)
def _lobject_read(self, fname, bin_size):
"""Read the large object, base64 encoded.
:param fname: file storage name, must be the oid as a string.
"""
lobj = self.lobject(self.env.cr, long(fname), "rb")
if bin_size:
return lobj.seek(0, 2)
return lobj.read().encode(
"base64"
) # GR TODO it must be possible to read-encode in chunks
@api.depends("store_fname", "db_datas")
def _compute_datas(self):
bin_size = self._context.get("bin_size")
for attach in self:
try:
attach.datas = self._lobject_read(attach.store_fname, bin_size)
except (psycopg2.OperationalError, ValueError):
super(IrAttachment, attach)._compute_datas()
| 31.379747 | 114 | 0.624445 |
import logging
import psycopg2
from odoo import api, models
logger = logging.getLogger(__name__)
LARGE_OBJECT_LOCATION = "postgresql:lobject"
class IrAttachment(models.Model):
_name = "ir.attachment"
_inherit = "ir.attachment"
@api.model
def lobject(self, cr, *args):
return cr._cnx.lobject(*args)
@api.model
def _file_write(self, value, checksum):
location = self._storage()
if location != LARGE_OBJECT_LOCATION:
return super(IrAttachment, self)._file_write(value, checksum)
lobj = self.lobject(self.env.cr, 0, "wb")
lobj.write(value.decode("base64"))
oid = lobj.oid
return str(oid)
def _file_delete(self, fname):
filestore = False
try:
oid = long(fname)
except Exception:
filestore = True
if not filestore:
try:
return self.lobject(self.env.cr, oid, "rb").unlink()
except (psycopg2.OperationalError, ValueError):
filestore = True
return super(IrAttachment, self)._file_delete(fname)
def _lobject_read(self, fname, bin_size):
lobj = self.lobject(self.env.cr, long(fname), "rb")
if bin_size:
return lobj.seek(0, 2)
return lobj.read().encode(
"base64"
)
@api.depends("store_fname", "db_datas")
def _compute_datas(self):
bin_size = self._context.get("bin_size")
for attach in self:
try:
attach.datas = self._lobject_read(attach.store_fname, bin_size)
except (psycopg2.OperationalError, ValueError):
super(IrAttachment, attach)._compute_datas()
| true | true |
f72f1077a32379129769997eb35ec1972c898c1c | 102 | py | Python | dircolors/pyls/__init__.py | mic-e/pydircolors | 62096e73ed065d015adde3f9e55c38e10a49dc3c | [
"Apache-2.0"
] | 3 | 2020-02-17T20:02:03.000Z | 2020-09-05T02:17:47.000Z | dircolors/pyls/__init__.py | mic-e/pydircolors | 62096e73ed065d015adde3f9e55c38e10a49dc3c | [
"Apache-2.0"
] | null | null | null | dircolors/pyls/__init__.py | mic-e/pydircolors | 62096e73ed065d015adde3f9e55c38e10a49dc3c | [
"Apache-2.0"
] | 2 | 2020-02-18T01:11:20.000Z | 2020-09-05T02:17:51.000Z | # dircolors.pyls package
""" pyls - a simple implementation of `ls` used to test python-dircolors """
| 34 | 76 | 0.72549 | true | true | |
f72f11d65f3800ed3177636dba116a8e2edb427c | 2,402 | py | Python | test/test_tcti.py | fer9898/tpm2-pytss | a2bd7292411511a9c6da3c2553c7af01470e63ef | [
"BSD-2-Clause"
] | null | null | null | test/test_tcti.py | fer9898/tpm2-pytss | a2bd7292411511a9c6da3c2553c7af01470e63ef | [
"BSD-2-Clause"
] | null | null | null | test/test_tcti.py | fer9898/tpm2-pytss | a2bd7292411511a9c6da3c2553c7af01470e63ef | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3 -u
# SPDX-License-Identifier: BSD-2
import unittest
from tpm2_pytss import *
from .TSS2_BaseTest import TSS2_EsapiTest
class TestTCTI(TSS2_EsapiTest):
def test_init(self):
self.assertEqual(self.tcti.version, 2)
self.assertGreater(self.tcti.magic, 0)
v1ctx = ffi.cast("TSS2_TCTI_CONTEXT_COMMON_V1 *", self.tcti._ctx)
v1ctx.version = 1
tcti = TCTI(self.tcti._ctx)
self.assertEqual(tcti.version, 1)
self.assertEqual(tcti._v2, None)
def test_transmit_receive(self):
startup = b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00"
self.tcti.transmit(startup)
resp = self.tcti.receive()
self.assertEqual(resp, b"\x80\x01\x00\x00\x00\n\x00\x00\x01\x00")
def test_finalize(self):
tcti = TCTI(self.tcti._ctx)
tcti.finalize()
def test_cancel(self):
if getattr(self.tcti, "name", "") == "swtpm":
self.skipTest("cancel supported by swtpm")
startup = b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00"
self.tcti.transmit(startup)
self.tcti.cancel()
def test_get_poll_handles(self):
tcti_name = getattr(self.tcti, "name", "")
try:
handles = self.tcti.get_poll_handles()
except TSS2_Exception as e:
if e.rc != lib.TSS2_TCTI_RC_NOT_IMPLEMENTED:
raise e
else:
self.skipTest(f"get_poll_handles not supported by {tcti_name}")
def test_set_locality(self):
self.tcti.set_locality(TPMA_LOCALITY.TWO)
def test_make_sticky(self):
tcti_name = getattr(self.tcti, "name", "")
if tcti_name in ("swtpm", "mssim"):
self.skipTest(f"make_sticky not supported by {tcti_name}")
raise Exception(self.tcti.name)
self.tcti.make_sticky(0, 0)
tcti._v2 = None
with self.assertRaises(RuntimeError) as e:
self.tcti.make_sticky(0, 0)
self.assertEqual(str(e.exception), "unsupported by TCTI API version")
def test_tctildr(self):
self.assertIsInstance(self.tcti.name, str)
self.assertIsInstance(self.tcti.conf, str)
with self.assertRaises(TypeError):
TCTILdr(name=None, conf=1234)
with self.assertRaises(TypeError):
TCTILdr(name=1234, conf=None)
if __name__ == "__main__":
unittest.main()
| 30.405063 | 79 | 0.628226 |
import unittest
from tpm2_pytss import *
from .TSS2_BaseTest import TSS2_EsapiTest
class TestTCTI(TSS2_EsapiTest):
def test_init(self):
self.assertEqual(self.tcti.version, 2)
self.assertGreater(self.tcti.magic, 0)
v1ctx = ffi.cast("TSS2_TCTI_CONTEXT_COMMON_V1 *", self.tcti._ctx)
v1ctx.version = 1
tcti = TCTI(self.tcti._ctx)
self.assertEqual(tcti.version, 1)
self.assertEqual(tcti._v2, None)
def test_transmit_receive(self):
startup = b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00"
self.tcti.transmit(startup)
resp = self.tcti.receive()
self.assertEqual(resp, b"\x80\x01\x00\x00\x00\n\x00\x00\x01\x00")
def test_finalize(self):
tcti = TCTI(self.tcti._ctx)
tcti.finalize()
def test_cancel(self):
if getattr(self.tcti, "name", "") == "swtpm":
self.skipTest("cancel supported by swtpm")
startup = b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00"
self.tcti.transmit(startup)
self.tcti.cancel()
def test_get_poll_handles(self):
tcti_name = getattr(self.tcti, "name", "")
try:
handles = self.tcti.get_poll_handles()
except TSS2_Exception as e:
if e.rc != lib.TSS2_TCTI_RC_NOT_IMPLEMENTED:
raise e
else:
self.skipTest(f"get_poll_handles not supported by {tcti_name}")
def test_set_locality(self):
self.tcti.set_locality(TPMA_LOCALITY.TWO)
def test_make_sticky(self):
tcti_name = getattr(self.tcti, "name", "")
if tcti_name in ("swtpm", "mssim"):
self.skipTest(f"make_sticky not supported by {tcti_name}")
raise Exception(self.tcti.name)
self.tcti.make_sticky(0, 0)
tcti._v2 = None
with self.assertRaises(RuntimeError) as e:
self.tcti.make_sticky(0, 0)
self.assertEqual(str(e.exception), "unsupported by TCTI API version")
def test_tctildr(self):
self.assertIsInstance(self.tcti.name, str)
self.assertIsInstance(self.tcti.conf, str)
with self.assertRaises(TypeError):
TCTILdr(name=None, conf=1234)
with self.assertRaises(TypeError):
TCTILdr(name=1234, conf=None)
if __name__ == "__main__":
unittest.main()
| true | true |
f72f13692aafad6149b0efd13c89c38ea0616f87 | 6,325 | py | Python | backend/users/api/views.py | tyrozz/django-next-backend | 1e06d8daa079548d0d4c79474f035041d44d5ead | [
"MIT"
] | 1 | 2022-02-24T17:55:35.000Z | 2022-02-24T17:55:35.000Z | backend/users/api/views.py | tyrozz/django-next-backend | 1e06d8daa079548d0d4c79474f035041d44d5ead | [
"MIT"
] | null | null | null | backend/users/api/views.py | tyrozz/django-next-backend | 1e06d8daa079548d0d4c79474f035041d44d5ead | [
"MIT"
] | null | null | null | from allauth.account import app_settings as allauth_settings
from allauth.account.models import EmailAddress
from allauth.account.utils import complete_signup
from allauth.account.views import ConfirmEmailView
from django.contrib.auth import get_user_model
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_post_parameters
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import MethodNotAllowed, ValidationError
from rest_framework.generics import CreateAPIView, GenericAPIView
from rest_framework.mixins import (
CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .serializers import (
CreateUserSerializer,
PasswordChangeSerializer,
PasswordResetConfirmSerializer,
RegisterSerializer,
ResendEmailVerificationSerializer,
UserPasswordResetSerializer,
UserSerializer,
VerifyEmailSerializer,
)
User = get_user_model()
sensitive_post_parameters_m = method_decorator(
sensitive_post_parameters(
"password",
"old_password",
"new_password1",
"new_password2",
),
)
class UserViewSet(RetrieveModelMixin, ListModelMixin, UpdateModelMixin, GenericViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
lookup_field = "username"
def get_queryset(self, *args, **kwargs):
return self.queryset.filter(id=self.request.user.id)
@action(detail=False, methods=["GET"])
def me(self, request):
serializer = UserSerializer(request.user, context={"request": request})
user_data = {"user": serializer.data}
return Response(status=status.HTTP_200_OK, data=user_data)
class UserCreateViewSet(CreateModelMixin, GenericViewSet):
queryset = User.objects.all()
serializer_class = CreateUserSerializer
permission_classes = [
AllowAny,
]
class PasswordResetView(GenericAPIView):
serializer_class = UserPasswordResetSerializer
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
# Create a serializer with request.data
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
# Return the success message with OK HTTP status
return Response(
{"detail": _("Password reset e-mail has been sent.")},
status=status.HTTP_200_OK,
)
class PasswordResetConfirmView(GenericAPIView):
serializer_class = PasswordResetConfirmSerializer
permission_classes = (AllowAny,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({"detail": _("Password has been reset with the new password")})
class PasswordChangeView(GenericAPIView):
serializer_class = PasswordChangeSerializer
permission_classes = (IsAuthenticated,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({"detail": _("New password has been saved")})
class VerifyEmailView(APIView, ConfirmEmailView):
permission_classes = (AllowAny,)
allowed_methods = ("POST", "OPTIONS", "HEAD")
def get_serializer(self, *args, **kwargs):
return VerifyEmailSerializer(*args, **kwargs)
def get(self, *args, **kwargs):
raise MethodNotAllowed("GET")
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.kwargs["key"] = serializer.validated_data["key"]
confirmation = self.get_object()
confirmation.confirm(self.request)
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
class ResendEmailVerificationView(CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = ResendEmailVerificationSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = EmailAddress.objects.get(**serializer.validated_data)
if not email:
raise ValidationError("Account does not exist")
if email.verified:
raise ValidationError("Account is already verified")
email.send_confirmation()
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
class RegisterView(CreateAPIView):
serializer_class = RegisterSerializer
permission_classes = (AllowAny,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_response_data(self, user):
if (
allauth_settings.EMAIL_VERIFICATION
== allauth_settings.EmailVerificationMethod.MANDATORY
):
return {"detail": _("Verification e-mail sent.")}
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
self.get_response_data(user),
status=status.HTTP_201_CREATED,
headers=headers,
)
def perform_create(self, serializer):
user = serializer.save(self.request)
complete_signup(
self.request._request,
user,
allauth_settings.EMAIL_VERIFICATION,
None,
)
return user
| 33.115183 | 88 | 0.707826 | from allauth.account import app_settings as allauth_settings
from allauth.account.models import EmailAddress
from allauth.account.utils import complete_signup
from allauth.account.views import ConfirmEmailView
from django.contrib.auth import get_user_model
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_post_parameters
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import MethodNotAllowed, ValidationError
from rest_framework.generics import CreateAPIView, GenericAPIView
from rest_framework.mixins import (
CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .serializers import (
CreateUserSerializer,
PasswordChangeSerializer,
PasswordResetConfirmSerializer,
RegisterSerializer,
ResendEmailVerificationSerializer,
UserPasswordResetSerializer,
UserSerializer,
VerifyEmailSerializer,
)
User = get_user_model()
sensitive_post_parameters_m = method_decorator(
sensitive_post_parameters(
"password",
"old_password",
"new_password1",
"new_password2",
),
)
class UserViewSet(RetrieveModelMixin, ListModelMixin, UpdateModelMixin, GenericViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
lookup_field = "username"
def get_queryset(self, *args, **kwargs):
return self.queryset.filter(id=self.request.user.id)
@action(detail=False, methods=["GET"])
def me(self, request):
serializer = UserSerializer(request.user, context={"request": request})
user_data = {"user": serializer.data}
return Response(status=status.HTTP_200_OK, data=user_data)
class UserCreateViewSet(CreateModelMixin, GenericViewSet):
queryset = User.objects.all()
serializer_class = CreateUserSerializer
permission_classes = [
AllowAny,
]
class PasswordResetView(GenericAPIView):
serializer_class = UserPasswordResetSerializer
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(
{"detail": _("Password reset e-mail has been sent.")},
status=status.HTTP_200_OK,
)
class PasswordResetConfirmView(GenericAPIView):
serializer_class = PasswordResetConfirmSerializer
permission_classes = (AllowAny,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({"detail": _("Password has been reset with the new password")})
class PasswordChangeView(GenericAPIView):
serializer_class = PasswordChangeSerializer
permission_classes = (IsAuthenticated,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({"detail": _("New password has been saved")})
class VerifyEmailView(APIView, ConfirmEmailView):
permission_classes = (AllowAny,)
allowed_methods = ("POST", "OPTIONS", "HEAD")
def get_serializer(self, *args, **kwargs):
return VerifyEmailSerializer(*args, **kwargs)
def get(self, *args, **kwargs):
raise MethodNotAllowed("GET")
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.kwargs["key"] = serializer.validated_data["key"]
confirmation = self.get_object()
confirmation.confirm(self.request)
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
class ResendEmailVerificationView(CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = ResendEmailVerificationSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = EmailAddress.objects.get(**serializer.validated_data)
if not email:
raise ValidationError("Account does not exist")
if email.verified:
raise ValidationError("Account is already verified")
email.send_confirmation()
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
class RegisterView(CreateAPIView):
serializer_class = RegisterSerializer
permission_classes = (AllowAny,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_response_data(self, user):
if (
allauth_settings.EMAIL_VERIFICATION
== allauth_settings.EmailVerificationMethod.MANDATORY
):
return {"detail": _("Verification e-mail sent.")}
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
self.get_response_data(user),
status=status.HTTP_201_CREATED,
headers=headers,
)
def perform_create(self, serializer):
user = serializer.save(self.request)
complete_signup(
self.request._request,
user,
allauth_settings.EMAIL_VERIFICATION,
None,
)
return user
| true | true |
f72f13d49d6c7a860c5b0fe7664a87a652eb88a8 | 641 | py | Python | tests/test_data/test_datasets/test_xml_dataset.py | Brym-Gyimah/mmdetection | d5d749afe57c77e2ec4500395faed3566fdfedae | [
"Apache-2.0"
] | 20,190 | 2018-09-10T01:11:53.000Z | 2022-03-31T22:31:33.000Z | tests/test_data/test_datasets/test_xml_dataset.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 6,736 | 2018-09-17T09:45:51.000Z | 2022-03-31T22:54:10.000Z | tests/test_data/test_datasets/test_xml_dataset.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 7,837 | 2018-09-11T02:58:23.000Z | 2022-03-31T22:31:38.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import DATASETS
def test_xml_dataset():
dataconfig = {
'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
'img_prefix': 'data/VOCdevkit/VOC2007/',
'pipeline': [{
'type': 'LoadImageFromFile'
}]
}
XMLDataset = DATASETS.get('XMLDataset')
class XMLDatasetSubClass(XMLDataset):
CLASSES = None
# get_ann_info and _filter_imgs of XMLDataset
# would use self.CLASSES, we added CLASSES not NONE
with pytest.raises(AssertionError):
XMLDatasetSubClass(**dataconfig)
| 26.708333 | 69 | 0.666147 |
import pytest
from mmdet.datasets import DATASETS
def test_xml_dataset():
dataconfig = {
'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
'img_prefix': 'data/VOCdevkit/VOC2007/',
'pipeline': [{
'type': 'LoadImageFromFile'
}]
}
XMLDataset = DATASETS.get('XMLDataset')
class XMLDatasetSubClass(XMLDataset):
CLASSES = None
with pytest.raises(AssertionError):
XMLDatasetSubClass(**dataconfig)
| true | true |
f72f1410ab89a22b8401c8e0baede753bd6f5c48 | 10,693 | py | Python | src/python/pants/base/specs.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 1 | 2021-02-22T18:11:26.000Z | 2021-02-22T18:11:26.000Z | src/python/pants/base/specs.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | null | null | null | src/python/pants/base/specs.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 2 | 2021-05-11T07:51:26.000Z | 2021-05-19T10:14:46.000Z | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import os
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, Mapping, Sequence, Tuple
from pants.base.exceptions import ResolveError
from pants.build_graph.address import Address
from pants.engine.fs import GlobExpansionConjunction, GlobMatchErrorBehavior, PathGlobs
from pants.engine.internals.target_adaptor import TargetAdaptor
from pants.util.dirutil import fast_relpath_optional, recursive_dirname
from pants.util.meta import frozen_after_init
if TYPE_CHECKING:
from pants.engine.internals.mapper import AddressFamily
class Spec(ABC):
"""A specification for what Pants should operate on."""
@abstractmethod
def __str__(self) -> str:
"""The normalized string representation of this spec."""
class AddressSpec(Spec, metaclass=ABCMeta):
"""Represents address selectors as passed from the command line."""
@dataclass(frozen=True)
class AddressLiteralSpec(AddressSpec):
"""An AddressSpec for a single address.
This may be a traditional address, like `a/b/c:c`, or a file address using disambiguation
syntax, e.g. `a/b/c.txt:tgt`.
"""
path_component: str
target_component: str
def __str__(self) -> str:
return f"{self.path_component}:{self.target_component}"
class AddressGlobSpec(AddressSpec, metaclass=ABCMeta):
@abstractmethod
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
"""Generate glob patterns matching exactly all the BUILD files this address spec covers."""
@abstractmethod
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
"""Given a dict of (namespace path) -> AddressFamily, return the values matching this
address spec.
:raises: :class:`ResolveError` if no address families matched this spec and this spec type
expects a match.
"""
def matching_addresses(
self, address_families: Sequence["AddressFamily"]
) -> Sequence[Tuple[Address, TargetAdaptor]]:
"""Given a list of AddressFamily, return (Address, TargetAdaptor) pairs matching this
address spec.
:raises: :class:`ResolveError` if no addresses could be matched and this spec type expects
a match.
"""
return tuple(
itertools.chain.from_iterable(
af.addresses_to_target_adaptors.items() for af in address_families
)
)
@dataclass(frozen=True)
class SiblingAddresses(AddressGlobSpec):
"""An AddressSpec representing all addresses located directly within the given directory."""
directory: str
def __str__(self) -> str:
return f"{self.directory}:"
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
return tuple(os.path.join(self.directory, pat) for pat in build_patterns)
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
maybe_af = address_families_dict.get(self.directory)
if maybe_af is None:
raise ResolveError(
f"Path '{self.directory}' does not contain any BUILD files, but '{self}' expected "
"matching targets there."
)
return (maybe_af,)
@dataclass(frozen=True)
class MaybeEmptyDescendantAddresses(AddressGlobSpec):
"""An AddressSpec representing all addresses located recursively under the given directory.
It is not an error if there are no such addresses.
"""
directory: str
def __str__(self) -> str:
return f"{self.directory}::"
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
return tuple(os.path.join(self.directory, "**", pat) for pat in build_patterns)
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
return tuple(
af
for ns, af in address_families_dict.items()
if fast_relpath_optional(ns, self.directory) is not None
)
class DescendantAddresses(MaybeEmptyDescendantAddresses):
"""An AddressSpec representing all addresses located recursively under the given directory.
At least one such address must exist.
"""
def matching_addresses(
self, address_families: Sequence["AddressFamily"]
) -> Sequence[Tuple[Address, TargetAdaptor]]:
matching = super().matching_addresses(address_families)
if len(matching) == 0:
raise ResolveError(f"Address spec '{self}' does not match any targets.")
return matching
@dataclass(frozen=True)
class AscendantAddresses(AddressGlobSpec):
"""An AddressSpec representing all addresses located recursively _above_ the given directory."""
directory: str
def __str__(self) -> str:
return f"{self.directory}^"
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
return tuple(
os.path.join(f, pattern)
for pattern in build_patterns
for f in recursive_dirname(self.directory)
)
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
return tuple(
af
for ns, af in address_families_dict.items()
if fast_relpath_optional(self.directory, ns) is not None
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecs:
literals: Tuple[AddressLiteralSpec, ...]
globs: Tuple[AddressGlobSpec, ...]
filter_by_global_options: bool
def __init__(
self, specs: Iterable[AddressSpec], *, filter_by_global_options: bool = False
) -> None:
"""Create the specs for what addresses Pants should run on.
If `filter_by_global_options` is set to True, then the resulting Addresses will be filtered
by the global options `--tag` and `--exclude-target-regexp`.
"""
literals = []
globs = []
for spec in specs:
if isinstance(spec, AddressLiteralSpec):
literals.append(spec)
elif isinstance(spec, AddressGlobSpec):
globs.append(spec)
else:
raise ValueError(f"Unexpected type of AddressSpec: {repr(self)}")
self.literals = tuple(literals)
self.globs = tuple(globs)
self.filter_by_global_options = filter_by_global_options
@property
def specs(self) -> Tuple[AddressSpec, ...]:
return (*self.literals, *self.globs)
def to_path_globs(
self, *, build_patterns: Iterable[str], build_ignore_patterns: Iterable[str]
) -> PathGlobs:
includes = set(
itertools.chain.from_iterable(spec.to_globs(build_patterns) for spec in self.globs)
)
ignores = (f"!{p}" for p in build_ignore_patterns)
return PathGlobs(globs=(*includes, *ignores))
def __bool__(self) -> bool:
return bool(self.specs)
class FilesystemSpec(Spec, metaclass=ABCMeta):
pass
@dataclass(frozen=True)
class FilesystemLiteralSpec(FilesystemSpec):
"""A literal file name, e.g. `foo.py`."""
file: str
def __str__(self) -> str:
return self.file
@dataclass(frozen=True)
class FilesystemGlobSpec(FilesystemSpec):
"""A spec with a glob or globs, e.g. `*.py` and `**/*.java`."""
glob: str
def __str__(self) -> str:
return self.glob
@dataclass(frozen=True)
class FilesystemIgnoreSpec(FilesystemSpec):
"""A spec to ignore certain files or globs."""
glob: str
def __post_init__(self) -> None:
if self.glob.startswith("!"):
raise ValueError(f"The `glob` for {self} should not start with `!`.")
def __str__(self) -> str:
return f"!{self.glob}"
@frozen_after_init
@dataclass(unsafe_hash=True)
class FilesystemSpecs:
includes: tuple[FilesystemLiteralSpec | FilesystemGlobSpec, ...]
ignores: tuple[FilesystemIgnoreSpec, ...]
def __init__(self, specs: Iterable[FilesystemSpec]) -> None:
includes = []
ignores = []
for spec in specs:
if isinstance(spec, (FilesystemLiteralSpec, FilesystemGlobSpec)):
includes.append(spec)
elif isinstance(spec, FilesystemIgnoreSpec):
ignores.append(spec)
else:
raise ValueError(f"Unexpected type of FilesystemSpec: {repr(self)}")
self.includes = tuple(includes)
self.ignores = tuple(ignores)
@property
def specs(self) -> Tuple[FilesystemSpec, ...]:
return (*self.includes, *self.ignores)
@staticmethod
def _generate_path_globs(
specs: Iterable[FilesystemSpec], glob_match_error_behavior: GlobMatchErrorBehavior
) -> PathGlobs:
return PathGlobs(
globs=(str(s) for s in specs),
glob_match_error_behavior=glob_match_error_behavior,
# We validate that _every_ glob is valid.
conjunction=GlobExpansionConjunction.all_match,
description_of_origin=(
None
if glob_match_error_behavior == GlobMatchErrorBehavior.ignore
else "file arguments"
),
)
def path_globs_for_spec(
self,
spec: FilesystemLiteralSpec | FilesystemGlobSpec,
glob_match_error_behavior: GlobMatchErrorBehavior,
) -> PathGlobs:
"""Generate PathGlobs for the specific spec, automatically including the instance's
FilesystemIgnoreSpecs."""
return self._generate_path_globs((spec, *self.ignores), glob_match_error_behavior)
def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:
"""Generate a single PathGlobs for the instance."""
return self._generate_path_globs((*self.includes, *self.ignores), glob_match_error_behavior)
def __bool__(self) -> bool:
return bool(self.specs)
@dataclass(frozen=True)
class Specs:
address_specs: AddressSpecs
filesystem_specs: FilesystemSpecs
@property
def provided(self) -> bool:
"""Did the user provide specs?"""
return bool(self.address_specs) or bool(self.filesystem_specs)
@classmethod
def empty(cls) -> Specs:
return Specs(AddressSpecs([], filter_by_global_options=True), FilesystemSpecs([]))
| 32.901538 | 100 | 0.666511 |
from __future__ import annotations
import itertools
import os
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, Mapping, Sequence, Tuple
from pants.base.exceptions import ResolveError
from pants.build_graph.address import Address
from pants.engine.fs import GlobExpansionConjunction, GlobMatchErrorBehavior, PathGlobs
from pants.engine.internals.target_adaptor import TargetAdaptor
from pants.util.dirutil import fast_relpath_optional, recursive_dirname
from pants.util.meta import frozen_after_init
if TYPE_CHECKING:
from pants.engine.internals.mapper import AddressFamily
class Spec(ABC):
@abstractmethod
def __str__(self) -> str:
class AddressSpec(Spec, metaclass=ABCMeta):
@dataclass(frozen=True)
class AddressLiteralSpec(AddressSpec):
path_component: str
target_component: str
def __str__(self) -> str:
return f"{self.path_component}:{self.target_component}"
class AddressGlobSpec(AddressSpec, metaclass=ABCMeta):
@abstractmethod
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
@abstractmethod
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
def matching_addresses(
self, address_families: Sequence["AddressFamily"]
) -> Sequence[Tuple[Address, TargetAdaptor]]:
return tuple(
itertools.chain.from_iterable(
af.addresses_to_target_adaptors.items() for af in address_families
)
)
@dataclass(frozen=True)
class SiblingAddresses(AddressGlobSpec):
directory: str
def __str__(self) -> str:
return f"{self.directory}:"
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
return tuple(os.path.join(self.directory, pat) for pat in build_patterns)
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
maybe_af = address_families_dict.get(self.directory)
if maybe_af is None:
raise ResolveError(
f"Path '{self.directory}' does not contain any BUILD files, but '{self}' expected "
"matching targets there."
)
return (maybe_af,)
@dataclass(frozen=True)
class MaybeEmptyDescendantAddresses(AddressGlobSpec):
directory: str
def __str__(self) -> str:
return f"{self.directory}::"
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
return tuple(os.path.join(self.directory, "**", pat) for pat in build_patterns)
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
return tuple(
af
for ns, af in address_families_dict.items()
if fast_relpath_optional(ns, self.directory) is not None
)
class DescendantAddresses(MaybeEmptyDescendantAddresses):
def matching_addresses(
self, address_families: Sequence["AddressFamily"]
) -> Sequence[Tuple[Address, TargetAdaptor]]:
matching = super().matching_addresses(address_families)
if len(matching) == 0:
raise ResolveError(f"Address spec '{self}' does not match any targets.")
return matching
@dataclass(frozen=True)
class AscendantAddresses(AddressGlobSpec):
directory: str
def __str__(self) -> str:
return f"{self.directory}^"
def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:
return tuple(
os.path.join(f, pattern)
for pattern in build_patterns
for f in recursive_dirname(self.directory)
)
def matching_address_families(
self, address_families_dict: Mapping[str, "AddressFamily"]
) -> Tuple["AddressFamily", ...]:
return tuple(
af
for ns, af in address_families_dict.items()
if fast_relpath_optional(self.directory, ns) is not None
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecs:
literals: Tuple[AddressLiteralSpec, ...]
globs: Tuple[AddressGlobSpec, ...]
filter_by_global_options: bool
def __init__(
self, specs: Iterable[AddressSpec], *, filter_by_global_options: bool = False
) -> None:
literals = []
globs = []
for spec in specs:
if isinstance(spec, AddressLiteralSpec):
literals.append(spec)
elif isinstance(spec, AddressGlobSpec):
globs.append(spec)
else:
raise ValueError(f"Unexpected type of AddressSpec: {repr(self)}")
self.literals = tuple(literals)
self.globs = tuple(globs)
self.filter_by_global_options = filter_by_global_options
@property
def specs(self) -> Tuple[AddressSpec, ...]:
return (*self.literals, *self.globs)
def to_path_globs(
self, *, build_patterns: Iterable[str], build_ignore_patterns: Iterable[str]
) -> PathGlobs:
includes = set(
itertools.chain.from_iterable(spec.to_globs(build_patterns) for spec in self.globs)
)
ignores = (f"!{p}" for p in build_ignore_patterns)
return PathGlobs(globs=(*includes, *ignores))
def __bool__(self) -> bool:
return bool(self.specs)
class FilesystemSpec(Spec, metaclass=ABCMeta):
pass
@dataclass(frozen=True)
class FilesystemLiteralSpec(FilesystemSpec):
file: str
def __str__(self) -> str:
return self.file
@dataclass(frozen=True)
class FilesystemGlobSpec(FilesystemSpec):
glob: str
def __str__(self) -> str:
return self.glob
@dataclass(frozen=True)
class FilesystemIgnoreSpec(FilesystemSpec):
glob: str
def __post_init__(self) -> None:
if self.glob.startswith("!"):
raise ValueError(f"The `glob` for {self} should not start with `!`.")
def __str__(self) -> str:
return f"!{self.glob}"
@frozen_after_init
@dataclass(unsafe_hash=True)
class FilesystemSpecs:
includes: tuple[FilesystemLiteralSpec | FilesystemGlobSpec, ...]
ignores: tuple[FilesystemIgnoreSpec, ...]
def __init__(self, specs: Iterable[FilesystemSpec]) -> None:
includes = []
ignores = []
for spec in specs:
if isinstance(spec, (FilesystemLiteralSpec, FilesystemGlobSpec)):
includes.append(spec)
elif isinstance(spec, FilesystemIgnoreSpec):
ignores.append(spec)
else:
raise ValueError(f"Unexpected type of FilesystemSpec: {repr(self)}")
self.includes = tuple(includes)
self.ignores = tuple(ignores)
@property
def specs(self) -> Tuple[FilesystemSpec, ...]:
return (*self.includes, *self.ignores)
@staticmethod
def _generate_path_globs(
specs: Iterable[FilesystemSpec], glob_match_error_behavior: GlobMatchErrorBehavior
) -> PathGlobs:
return PathGlobs(
globs=(str(s) for s in specs),
glob_match_error_behavior=glob_match_error_behavior,
conjunction=GlobExpansionConjunction.all_match,
description_of_origin=(
None
if glob_match_error_behavior == GlobMatchErrorBehavior.ignore
else "file arguments"
),
)
def path_globs_for_spec(
self,
spec: FilesystemLiteralSpec | FilesystemGlobSpec,
glob_match_error_behavior: GlobMatchErrorBehavior,
) -> PathGlobs:
return self._generate_path_globs((spec, *self.ignores), glob_match_error_behavior)
def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:
return self._generate_path_globs((*self.includes, *self.ignores), glob_match_error_behavior)
def __bool__(self) -> bool:
return bool(self.specs)
@dataclass(frozen=True)
class Specs:
address_specs: AddressSpecs
filesystem_specs: FilesystemSpecs
@property
def provided(self) -> bool:
return bool(self.address_specs) or bool(self.filesystem_specs)
@classmethod
def empty(cls) -> Specs:
return Specs(AddressSpecs([], filter_by_global_options=True), FilesystemSpecs([]))
| true | true |
f72f15c5de015caaeac2371e1ce51f83332786d4 | 1,144 | py | Python | lib/datasets/synthetic_example.py | liulu112601/AutoDL-Projects | 59d07b63420837f56ea45b678784e299612477c3 | [
"MIT"
] | null | null | null | lib/datasets/synthetic_example.py | liulu112601/AutoDL-Projects | 59d07b63420837f56ea45b678784e299612477c3 | [
"MIT"
] | null | null | null | lib/datasets/synthetic_example.py | liulu112601/AutoDL-Projects | 59d07b63420837f56ea45b678784e299612477c3 | [
"MIT"
] | null | null | null | #####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 #
#####################################################
import copy
from .math_dynamic_funcs import DynamicQuadraticFunc
from .math_adv_funcs import ConstantFunc, ComposedSinFunc
from .synthetic_env import SyntheticDEnv
def create_example_v1(
timestamp_config=None,
num_per_task=5000,
):
mean_generator = ComposedSinFunc()
std_generator = ComposedSinFunc(min_amplitude=0.5, max_amplitude=0.5)
dynamic_env = SyntheticDEnv(
[mean_generator],
[[std_generator]],
num_per_task=num_per_task,
timestamp_config=timestamp_config,
)
function = DynamicQuadraticFunc()
function_param = dict()
function_param[0] = ComposedSinFunc(
num_sin_phase=4, phase_shift=1.0, max_amplitude=1.0
)
function_param[1] = ConstantFunc(constant=0.9)
function_param[2] = ComposedSinFunc(
num_sin_phase=5, phase_shift=0.4, max_amplitude=0.9
)
function.set(function_param)
dynamic_env.set_oracle_map(copy.deepcopy(function))
return dynamic_env, function
| 30.105263 | 73 | 0.657343 | true | true | |
f72f163111d0ad06a68b563a14a17a629ddccdab | 756 | py | Python | milarun/lib/monitor.py | laceyg/milabench | a314094a406c2e98a932f6d4f3a9588a991148d3 | [
"MIT"
] | 7 | 2020-07-09T18:55:00.000Z | 2022-03-31T19:51:29.000Z | milarun/lib/monitor.py | laceyg/milabench | a314094a406c2e98a932f6d4f3a9588a991148d3 | [
"MIT"
] | 6 | 2020-07-02T08:58:39.000Z | 2021-02-01T20:31:28.000Z | milarun/lib/monitor.py | laceyg/milabench | a314094a406c2e98a932f6d4f3a9588a991148d3 | [
"MIT"
] | 8 | 2020-06-19T17:16:19.000Z | 2022-03-31T19:34:49.000Z | import GPUtil
from threading import Thread
import time
class GPUMonitor(Thread):
def __init__(self, delay):
super().__init__()
self.stopped = False
self.delay = delay
self.data = {
g.id: dict(
load=[],
memory=[],
temperature=[]
)
for g in GPUtil.getGPUs()
}
def run(self):
while not self.stopped:
for g in GPUtil.getGPUs():
data = self.data[g.id]
data["load"].append(g.load)
data["memory"].append(g.memoryUsed)
data["temperature"].append(g.temperature)
time.sleep(self.delay)
def stop(self):
self.stopped = True
| 24.387097 | 57 | 0.492063 | import GPUtil
from threading import Thread
import time
class GPUMonitor(Thread):
def __init__(self, delay):
super().__init__()
self.stopped = False
self.delay = delay
self.data = {
g.id: dict(
load=[],
memory=[],
temperature=[]
)
for g in GPUtil.getGPUs()
}
def run(self):
while not self.stopped:
for g in GPUtil.getGPUs():
data = self.data[g.id]
data["load"].append(g.load)
data["memory"].append(g.memoryUsed)
data["temperature"].append(g.temperature)
time.sleep(self.delay)
def stop(self):
self.stopped = True
| true | true |
f72f168e38670fee460a5423e951a0f83d8cd4f5 | 702 | py | Python | matplotlib/matplotlib_test/plot_lib_test.py | alphaciel/Balancing-Robot-Raspberry-Pi-DIY | 8a61acf688ea0915017c40eaff3841a9b219f9b7 | [
"MIT"
] | null | null | null | matplotlib/matplotlib_test/plot_lib_test.py | alphaciel/Balancing-Robot-Raspberry-Pi-DIY | 8a61acf688ea0915017c40eaff3841a9b219f9b7 | [
"MIT"
] | null | null | null | matplotlib/matplotlib_test/plot_lib_test.py | alphaciel/Balancing-Robot-Raspberry-Pi-DIY | 8a61acf688ea0915017c40eaff3841a9b219f9b7 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider, Button, RadioButtons
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.25, bottom=0.25)
min0 = 0
max0 = 25000
im = max0 * np.random.random((10,10))
im1 = ax.imshow(im)
fig.colorbar(im1)
axcolor = 'lightgoldenrodyellow'
axmin = fig.add_axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axmax = fig.add_axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
smin = Slider(axmin, 'Min', 0, 30000, valinit=min0)
smax = Slider(axmax, 'Max', 0, 30000, valinit=max0)
def update(val):
im1.set_clim([smin.val,smax.val])
fig.canvas.draw()
smin.on_changed(update)
smax.on_changed(update)
plt.show() | 25.071429 | 63 | 0.709402 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider, Button, RadioButtons
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.25, bottom=0.25)
min0 = 0
max0 = 25000
im = max0 * np.random.random((10,10))
im1 = ax.imshow(im)
fig.colorbar(im1)
axcolor = 'lightgoldenrodyellow'
axmin = fig.add_axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axmax = fig.add_axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
smin = Slider(axmin, 'Min', 0, 30000, valinit=min0)
smax = Slider(axmax, 'Max', 0, 30000, valinit=max0)
def update(val):
im1.set_clim([smin.val,smax.val])
fig.canvas.draw()
smin.on_changed(update)
smax.on_changed(update)
plt.show() | true | true |
f72f16a11c68a6687ece2fc6cae8e47c19ce6f15 | 613 | py | Python | src/rakali/transforms.py | sthysel/rakali | d641b8f75e7bbbd4ab973060658f5b30d24e99ff | [
"MIT"
] | 6 | 2019-02-17T01:23:39.000Z | 2021-01-13T23:01:16.000Z | src/rakali/transforms.py | sthysel/rakali | d641b8f75e7bbbd4ab973060658f5b30d24e99ff | [
"MIT"
] | 1 | 2020-09-09T19:52:17.000Z | 2021-09-13T08:25:44.000Z | src/rakali/transforms.py | sthysel/rakali | d641b8f75e7bbbd4ab973060658f5b30d24e99ff | [
"MIT"
] | 3 | 2021-07-21T14:09:43.000Z | 2021-08-22T15:03:48.000Z | import cv2 as cv
def scale(img, scale):
"""scale preserving aspect ratio"""
return resize(img, x_scale=scale, y_scale=scale)
def resize(img, x_scale, y_scale, optimize=True):
"""resize image by scaling using provided factors"""
interpolation = cv.INTER_LINEAR
# pick an optimized scaler if asked to
if optimize:
if x_scale > 1 and y_scale > 1:
interpolation = cv.INTER_CUBIC
else:
interpolation = cv.INTER_AREA
return cv.resize(
img,
None,
fx=x_scale,
fy=y_scale,
interpolation=interpolation,
)
| 22.703704 | 56 | 0.619902 | import cv2 as cv
def scale(img, scale):
return resize(img, x_scale=scale, y_scale=scale)
def resize(img, x_scale, y_scale, optimize=True):
interpolation = cv.INTER_LINEAR
if optimize:
if x_scale > 1 and y_scale > 1:
interpolation = cv.INTER_CUBIC
else:
interpolation = cv.INTER_AREA
return cv.resize(
img,
None,
fx=x_scale,
fy=y_scale,
interpolation=interpolation,
)
| true | true |
f72f17683039c31b552fe4aaaab293ebc4b067b4 | 1,380 | py | Python | load_isbi.py | lone17/deform-conv | 3502cedbeae61c961d7e988382c55b9d45fd1873 | [
"MIT"
] | null | null | null | load_isbi.py | lone17/deform-conv | 3502cedbeae61c961d7e988382c55b9d45fd1873 | [
"MIT"
] | null | null | null | load_isbi.py | lone17/deform-conv | 3502cedbeae61c961d7e988382c55b9d45fd1873 | [
"MIT"
] | null | null | null | from keras.preprocessing.image import ImageDataGenerator
data_gen_args = dict(rescale=1./255,
rotation_range=0.2,
shear_range=0.2,
zoom_range=0.2,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
seed = 17
train_image_gen = \
ImageDataGenerator(**data_gen_args)\
.flow_from_directory('ISBI/train', classes=['image'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1,
shuffle=True, seed=seed)
train_mask_gen = \
ImageDataGenerator(**data_gen_args)\
.flow_from_directory('ISBI/train', classes=['label'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1,
shuffle=True, seed=seed)
val_image_gen = \
ImageDataGenerator(rescale=1./255)\
.flow_from_directory('ISBI/val', classes=['image'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1)
val_mask_gen = \
ImageDataGenerator(rescale=1./255)\
.flow_from_directory('ISBI/val', classes=['label'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1)
| 41.818182 | 81 | 0.588406 | from keras.preprocessing.image import ImageDataGenerator
data_gen_args = dict(rescale=1./255,
rotation_range=0.2,
shear_range=0.2,
zoom_range=0.2,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
seed = 17
train_image_gen = \
ImageDataGenerator(**data_gen_args)\
.flow_from_directory('ISBI/train', classes=['image'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1,
shuffle=True, seed=seed)
train_mask_gen = \
ImageDataGenerator(**data_gen_args)\
.flow_from_directory('ISBI/train', classes=['label'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1,
shuffle=True, seed=seed)
val_image_gen = \
ImageDataGenerator(rescale=1./255)\
.flow_from_directory('ISBI/val', classes=['image'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1)
val_mask_gen = \
ImageDataGenerator(rescale=1./255)\
.flow_from_directory('ISBI/val', classes=['label'], target_size=(512, 512),
color_mode='grayscale', class_mode=None, batch_size=1)
| true | true |
f72f183dad87e98bbd6b38f75d1d545a70ce6384 | 2,519 | py | Python | parser/fase2/team10/sql/Instrucciones/Sql_alter/AlterTableAlterColumn.py | Gabriel-15/tytus | fb00718bf3fcc5211a3604fba1a551f44bdc6deb | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/fase2/team10/sql/Instrucciones/Sql_alter/AlterTableAlterColumn.py | Gabriel-15/tytus | fb00718bf3fcc5211a3604fba1a551f44bdc6deb | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/fase2/team10/sql/Instrucciones/Sql_alter/AlterTableAlterColumn.py | Gabriel-15/tytus | fb00718bf3fcc5211a3604fba1a551f44bdc6deb | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from sql.Instrucciones.TablaSimbolos.Instruccion import Instruccion
from sql.Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint
from sql.Instrucciones.Excepcion import Excepcion
class AlterTableAlterColumn(Instruccion):
def __init__(self, tabla, col, strGram, linea, columna):
Instruccion.__init__(self,None,linea,columna,strGram)
self.tabla = tabla
self.col = col
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if arbol.bdUsar != None:
objetoTabla = arbol.devolviendoTablaDeBase(self.tabla)
if objetoTabla != 0:
existe = None
for columnas in objetoTabla.lista_de_campos:
if columnas.nombre == self.col:
existe = columnas
if existe != None:
#print(len(listaUnique),self.tabla, self.id)
#Insertar llaves Unique
if existe.constraint != None:
existe.constraint.append(Tipo_Constraint(None, Tipo_Dato_Constraint.NOT_NULL, None))
#print("MÁS DE UNA-----------------",existe.nombre, existe.tipo.toString(),len(existe.constraint))
else:
existe.constraint = []
existe.constraint.append(Tipo_Constraint(None, Tipo_Dato_Constraint.NOT_NULL, None))
#print("SOLO UNA-------------",existe.nombre, existe.tipo.toString(),len(existe.constraint))
arbol.consola.append("Consulta devuelta correctamente.")
else:
#print(listaNombres,self.lista_col)
#print(lista)
error = Excepcion('42P01',"Semántico","No existe la columna «"+self.col+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
| 51.408163 | 132 | 0.579595 | from sql.Instrucciones.TablaSimbolos.Instruccion import Instruccion
from sql.Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint
from sql.Instrucciones.Excepcion import Excepcion
class AlterTableAlterColumn(Instruccion):
def __init__(self, tabla, col, strGram, linea, columna):
Instruccion.__init__(self,None,linea,columna,strGram)
self.tabla = tabla
self.col = col
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if arbol.bdUsar != None:
objetoTabla = arbol.devolviendoTablaDeBase(self.tabla)
if objetoTabla != 0:
existe = None
for columnas in objetoTabla.lista_de_campos:
if columnas.nombre == self.col:
existe = columnas
if existe != None:
if existe.constraint != None:
existe.constraint.append(Tipo_Constraint(None, Tipo_Dato_Constraint.NOT_NULL, None))
else:
existe.constraint = []
existe.constraint.append(Tipo_Constraint(None, Tipo_Dato_Constraint.NOT_NULL, None))
arbol.consola.append("Consulta devuelta correctamente.")
else:
error = Excepcion('42P01',"Semántico","No existe la columna «"+self.col+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
| true | true |
f72f18abb7dcfee0f3b682427c6f42194a6ada0f | 6,676 | py | Python | KaboomImages.py | Toinas/IntelligentSuit | f3d33b327406d899d18ecf5fd6a100b0c786fdce | [
"MIT"
] | null | null | null | KaboomImages.py | Toinas/IntelligentSuit | f3d33b327406d899d18ecf5fd6a100b0c786fdce | [
"MIT"
] | null | null | null | KaboomImages.py | Toinas/IntelligentSuit | f3d33b327406d899d18ecf5fd6a100b0c786fdce | [
"MIT"
] | null | null | null | Kaboom_0=[
0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,
0,0,0,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,2,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,2,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,2,0,0,0,0,0,0,
0,0,2,1,1,1,1,1,1,2,0,0,0,0,2,2,
0,0,2,1,1,1,1,1,1,2,0,0,0,0,2,2,
0,0,2,1,2,1,1,2,1,2,0,0,0,0,0,0,
0,0,2,1,1,1,1,1,1,2,0,0,0,0,0,3,
0,0,0,2,2,1,1,2,2,0,0,0,0,0,4,3,
0,0,2,2,2,2,2,2,2,2,2,4,4,4,0,0,
0,0,2,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,0,0,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,0,0,2,2,0,2,2,0,0,0,0,0,0,0,
0,0,0,0,2,2,0,2,2,0,0,0,0,0,0,0]
kaboom_1=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_2=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
2,2,2,2,2,2,2,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_3=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
2,2,2,2,2,2,2,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_4=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,
0,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
2,2,2,2,2,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
0,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_5=[0,0,3,0,0,0,0,0,0,0,0,0,0,0,3,0,
0,0,0,3,0,0,0,0,3,0,0,0,0,3,0,0,
0,0,0,0,3,0,3,0,3,0,3,0,3,0,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
2,2,2,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,3,0,3,0,3,0,3,0,3,0,0,0,
0,0,0,3,0,0,0,0,3,0,0,0,0,3,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_6=[0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,0,0,3,0,3,0,0,3,0,0,3,0,3,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,3,0,3,0,0,3,0,0,3,0,3,0,0,
0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,3,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_7=[0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,3,0,0,0,3,0,0,3,0,0,3,0,0,0,3,
3,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0]
kaboom_8=[0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,3,0,3,0,3,0,0,3,0,0,3,0,3,0,3,
3,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0]
kaboom_9=[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,0,3,0,0,3,0,0,3,0,0,3,0,3,0,3,
0,0,3,0,0,3,0,0,3,0,0,3,0,3,0,0]
kaboom_10=[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,0,3,0,0,3,0,0,3,0,0,3,0,3,0,3]
kaboom_11=[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] | 32.407767 | 44 | 0.474835 | Kaboom_0=[
0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,
0,0,0,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,2,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,2,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,2,0,0,0,0,0,0,
0,0,2,1,1,1,1,1,1,2,0,0,0,0,2,2,
0,0,2,1,1,1,1,1,1,2,0,0,0,0,2,2,
0,0,2,1,2,1,1,2,1,2,0,0,0,0,0,0,
0,0,2,1,1,1,1,1,1,2,0,0,0,0,0,3,
0,0,0,2,2,1,1,2,2,0,0,0,0,0,4,3,
0,0,2,2,2,2,2,2,2,2,2,4,4,4,0,0,
0,0,2,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,2,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,0,0,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,0,0,2,2,0,2,2,0,0,0,0,0,0,0,
0,0,0,0,2,2,0,2,2,0,0,0,0,0,0,0]
kaboom_1=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_2=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
2,2,2,2,2,2,2,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_3=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
2,2,2,2,2,2,2,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,3,3,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_4=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,
0,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
2,2,2,2,2,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
0,0,0,0,0,0,3,3,3,3,3,0,0,0,0,0,
0,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_5=[0,0,3,0,0,0,0,0,0,0,0,0,0,0,3,0,
0,0,0,3,0,0,0,0,3,0,0,0,0,3,0,0,
0,0,0,0,3,0,3,0,3,0,3,0,3,0,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
2,2,2,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,0,3,3,3,3,3,3,3,0,0,0,0,
0,0,0,0,3,0,3,0,3,0,3,0,3,0,0,0,
0,0,0,3,0,0,0,0,3,0,0,0,0,3,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_6=[0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,0,0,3,0,3,0,0,3,0,0,3,0,3,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,0,3,3,3,3,3,3,3,3,3,0,0,0,
0,0,0,3,0,3,0,0,3,0,0,3,0,3,0,0,
0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,3,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
kaboom_7=[0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,0,3,3,3,3,3,3,3,3,3,3,3,0,0,
0,0,3,0,0,3,0,0,3,0,0,3,0,0,3,0,
0,3,0,0,0,3,0,0,3,0,0,3,0,0,0,3,
3,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0]
kaboom_8=[0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,
0,3,0,3,0,3,0,0,3,0,0,3,0,3,0,3,
3,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0,
0,0,0,0,0,3,0,0,3,0,0,3,0,0,0,0]
kaboom_9=[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,0,3,0,0,3,0,0,3,0,0,3,0,3,0,3,
0,0,3,0,0,3,0,0,3,0,0,3,0,3,0,0]
kaboom_10=[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,0,3,0,0,3,0,0,3,0,0,3,0,3,0,3]
kaboom_11=[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] | true | true |
f72f190a17b6b82a72841a82dddb9b59a381bfe5 | 197 | py | Python | worldclock/views.py | arminfriedl/netclock | f83f0faa1f2c8f06dc04d2d6de315a7b35f1c361 | [
"MIT"
] | null | null | null | worldclock/views.py | arminfriedl/netclock | f83f0faa1f2c8f06dc04d2d6de315a7b35f1c361 | [
"MIT"
] | null | null | null | worldclock/views.py | arminfriedl/netclock | f83f0faa1f2c8f06dc04d2d6de315a7b35f1c361 | [
"MIT"
] | null | null | null | from flask import render_template, request, flash, redirect, url_for, session
from . import app
@app.route('', methods=['GET'])
def create():
return render_template("worldclock/create.html")
| 24.625 | 77 | 0.736041 | from flask import render_template, request, flash, redirect, url_for, session
from . import app
@app.route('', methods=['GET'])
def create():
return render_template("worldclock/create.html")
| true | true |
f72f1912a1ea4ca3114f0a355e5ee07788eceb1f | 1,182 | py | Python | pyairwatch/system/users.py | jprichards/PyVMwareAirWatch | ac25185d1f6a001c9670d57d11a3047553f743a1 | [
"MIT"
] | 17 | 2018-03-19T05:52:37.000Z | 2022-03-09T16:41:03.000Z | pyairwatch/system/users.py | marshall-brown/PyVMwareAirWatch | ac25185d1f6a001c9670d57d11a3047553f743a1 | [
"MIT"
] | 2 | 2018-12-06T17:12:54.000Z | 2019-08-27T09:57:13.000Z | pyairwatch/system/users.py | marshall-brown/PyVMwareAirWatch | ac25185d1f6a001c9670d57d11a3047553f743a1 | [
"MIT"
] | 9 | 2018-04-02T18:42:51.000Z | 2020-06-10T02:11:05.000Z | class Users(object):
def __init__(self, client):
self.client = client
#UNTESTED
def search(self, **kwargs):
"""
Returns the Enrollment User's details matching the search parameters
/api/system/users/search?{params}
PARAMS:
username={username}
firstname={firstname}
lastname={lastname}
email={email}
organizationgroupid={locationgroupid}
role={role}
"""
response = self._get(path='/users/search', params=kwargs)
return response
def _get(self, module='system', path=None, version=None, params=None, header=None):
"""GET requests for the /System/Users module."""
response = self.client.get(module=module, path=path, version=version, params=params, header=header)
return response
def _post(self, module='system', path=None, version=None, params=None, data=None, json=None, header=None):
"""POST requests for the /System/Users module."""
response = self.client.post(module=module, path=path, version=version, params=params, data=data, json=json, header=header)
return response
| 35.818182 | 130 | 0.628596 | class Users(object):
def __init__(self, client):
self.client = client
def search(self, **kwargs):
response = self._get(path='/users/search', params=kwargs)
return response
def _get(self, module='system', path=None, version=None, params=None, header=None):
response = self.client.get(module=module, path=path, version=version, params=params, header=header)
return response
def _post(self, module='system', path=None, version=None, params=None, data=None, json=None, header=None):
response = self.client.post(module=module, path=path, version=version, params=params, data=data, json=json, header=header)
return response
| true | true |
f72f1930dfc31b7280f1354dfad37771fff132eb | 14,948 | py | Python | veroviz/getTimeDist3D.py | optimatorlab/veroviz | 4b4b7da07abbc764169223cc4cac41e19ff7031d | [
"MIT"
] | 16 | 2019-11-05T06:33:21.000Z | 2022-02-09T04:37:03.000Z | veroviz/getTimeDist3D.py | optimatorlab/veroviz | 4b4b7da07abbc764169223cc4cac41e19ff7031d | [
"MIT"
] | 6 | 2019-11-22T09:38:01.000Z | 2021-06-18T02:08:43.000Z | veroviz/getTimeDist3D.py | optimatorlab/veroviz | 4b4b7da07abbc764169223cc4cac41e19ff7031d | [
"MIT"
] | 4 | 2020-09-25T07:48:56.000Z | 2022-02-09T04:39:54.000Z | from veroviz._common import *
from veroviz._validation import *
from veroviz._buildFlightProfile import buildNoLoiteringFlight
from veroviz._buildFlightProfile import getTimeDistFromFlight
from veroviz._utilities import privConvertDistance
from veroviz._utilities import privConvertTime
def getTimeDist3D(nodes=None, matrixType='all2all', fromNodeID=None, toNodeID=None, takeoffSpeedMPS=None, cruiseSpeedMPS=None, landSpeedMPS=None, cruiseAltMetersAGL=None,
routeType='square', climbRateMPS=None, descentRateMPS=None, outputDistUnits='meters', outputTimeUnits='seconds'):
"""
This function calculates travel time and distance for vehicles that travel in 3-dimensional space (e.g., drones). The function returns three dictionaries; one for time, one for ground distance, and one for overall (3D) travel distance.
Parameters
----------
nodes: :ref:`Nodes`, Required, default as None
This :ref:`Nodes` dataframe contains the locations between which the travel time and distance will be calculated.
matrixType: string, Optional, default as 'all2all'
Specifies the structure of the travel matrices. Valid options include 'all2all', 'many2one', and 'one2many'. The default 'all2all' option will return square matrices (one for time, one for distance) describing the directed travel time and travel distance between all pairs of nodes. The 'one2many' option will return vectors describing the directed travel from one node to all other nodes. Similarly, the 'many2one' option will return vectors describing the directed travel from all nodes to a given node. See the table in the note below for details.
fromNodeID: int, Optional, default as None
Specifies the node ID (from the `id` column of the input `nodes` dataframe) of the origin node. This parameter is required for the 'one2many' matrix type; it is ignored by all other matrix types. See the table in the note below for details.
toNodeID: int, Optional, default as None
Specifies the node ID (from the `id` column of the input `nodes` dataframe) of the destination node. This parameter is required for the 'many2one' matrix type; it is ignored for all other matrix types. See the table in the note below for details.
takeoffSpeedMPS: float, Conditional, default as None
The speed of the aircraft, in meters per second, during the "takeoff" phase. This will apply only to 'square' and 'trapezoidal' route types. The takeoff phase is the first component of these route types, and is associated with an increase in altitude. The takeoff speed is assumed to be constant, and ignores acceleration. See :ref:`Flight Profile and Flight Path` for additional information.
cruiseSpeedMPS: float, Conditional, default as None
The speed of the aircraft, in meters per second, during the "cruising" phase. This will apply to all of the route options. Typically, the cruising phase occurs at a constant altitude, as specified by `cruiseAltMetersAGL`. However, for the 'triangular' route type, cruiseSpeedMPS specifies the constant travel speed during both the ascent to, and immediate descent from, the cruise altitude. In the 'triangle' route type, the aircraft has no horizontal travel at the cruise altitude. In all cases, the cruise speed is assumed to be constant, and ignores acceleration. See :ref:`Flight Profile and Flight Path` for additional information.
landSpeedMPS: float, Conditional, default as None
The speed of the aircraft, in meters per second, during the "landing" phase. This will apply to only the 'square' and 'trapezoidal' route types. The landing phase is the last component of these route types, and is associated with a decrease in altitude. The landing speed is assumed to be constant, and ignore deceleration. See :ref:`Flight Profile and Flight Path` for additional information.
cruiseAltMetersAGL: float, Conditional, default as None
The altitude, in meters above ground level, at which the aircraft is in the "cruise" phase. This phase is typically associated with horizontal movement at a fixed altitude. The exception is for the 'triangular' route type, in which case the aircraft instantaneously transitions from ascent to descent at the cruise altitude (i.e., there is no horizontal travel at this altitude). All but the 'straight' route type require/use the cruise altitude. See :ref:`Flight Profile and Flight Path` for additional details.
routeType: string, Optional, default as 'square'
Specifies the basic shape of the flight profile. Valid options include 'square', 'triangular', 'trapezoidal', and 'straight'. The square profile involves a vertical takeoff to a cruising altitude, horizontal travel at the cruising altitude, and a vertical landing. The trapezoidal profile describes a takeoff phase in which the aircraft increases altitude and travels horizontally towards the destination until reaching the cruising altitude, horizontal travel at the cruising altitude, and a landing phase in which the aircraft decreases altitude and travels horizontally until reaching the destination. For the trapezoidal profile, the horizontal movement during the takeoff and landing phases is a function of the `climbRateMPS` and `descentRateMPS`, respectively. The triangular profile describes an ascent to the cruising altitude followed immediately by a descent to the destination. Finally, the straight profile describes straight-line flight directly from the starting location to the ending location; the altitudes of these two locations may differ. See :ref:`Flight Profile and Flight Path` for a description of these flight profiles.
climbRateMPS: float, Conditional, default as None
This parameter is used only for the 'trapezoidal' route type, and is in units of meters per second. It describes the rate at which the aircraft increases its altitude, relative to the value of `takeoffSpeedMPS`. If `climbRateMPS == takeoffSpeedMPS`, then the takeoff phase will be purely vertical. If `climbRateMPS` is close to zero, then the takeoff phase will be characterized by a slow increase in altitude (and longer horizontal flight). The aircraft's actual travel speed during the climb will be `takeoffSpeedMPS`. See :ref:`Flight Profile and Flight Path` for additional details.
descentRateMPS: float, Conditional, default as None
This parameter is used only for the 'trapezoidal' route type, and is in units of meters per second. It describes the rate at which the aircraft decreases its altitude, relative to the value of `landSpeedMPS`. If `descentRateMPS == landSpeedMPS`, then the landing phase will be purely vertical. If `descentRateMPS` is close to zero, then the landing phase will be characterized by a slow decrease in altitude (and longer horizontal flight). The aircraft's actual travel speed during the descent will be `landSpeedMPS`. See :ref:`Flight Profile and Flight Path` for additional details.
outputDistUnits: string, Optional, default as 'meters'
Specifies the desired distance units for the function's output. Valid values are 'meters', 'm', 'kilometers', 'km', 'miles', 'mi', 'feet', 'ft', 'nm', and 'nmi' (nautical miles). See :ref:`Units` for options and abbreviations.
outputTimeUnits: string, Optional, default as 'seconds'
Specifies the desired time units for the function's output. Valid values are 'seconds', 'hours', and 'minutes'. See :ref:`Units` for options and abbreviations.
Returns
-------
totalTime: dictionary
A Python dictionary containing travel times. Time units are defined by `outputTimeUnits`. The format of key values is: `(fromID, toID)`. The travel time from ID 1 to ID 2 is provided by `time[1, 2]`.
totalGroundDistance: dictionary
A Python dictionary containing ground travel distances (i.e., ignoring any vertical distances). Distance units are defined by `outputDistUnits`. The format of key values is: `(fromID, toID)`. The horizontal-only travel distance from ID 1 to ID 2 is provided by `totalGroundDistance[1, 2]`.
totalFlightDistance: dictionary
A Python dictionary containing total travel distances (i.e., including both the horizontal and vertical components of flight). Distance units are defined by `outputDistUnits`. The format of key values is: `(fromID, toID)`. The total travel distance from ID 1 to ID 2 is provided by `totalFlightDistance[1, 2]`.
Note
----
For `matrixType`, the options are 'all2all', 'one2many', and 'many2one'.
+----------------------+--------------+------------+------------------+
| `matrixType` options | `fromNodeID` | `toNodeID` | Return type |
+======================+==============+============+==================+
| 'all2all' | ignored | ignored | Square matrices |
+----------------------+--------------+------------+------------------+
| 'one2many' | required | ignored | Row vectors |
+----------------------+--------------+------------+------------------+
| 'many2one' | ignored | required | Column vectors |
+----------------------+--------------+------------+------------------+
In 'all2all', square matrices will be generated for all node pairs in the
provided `nodes` dataframe.
In 'one2many', a node `id` will be assigned in the `fromNodeID` field, which
comes from the `id` column in the provided `nodes` dataframe.
Row vectors will be returned for the time and distance from that node
to all the nodes in the provided `nodes` dataframe.
In 'many2one', column vectors will be returned for the time and distance
from all nodes in the provided `nodes` dataframe to the node indicated
by `toNodeID`.
Examples
--------
Import veroviz and check if the version is up-to-date
>>> import veroviz as vrv
>>> vrv.checkVersion()
Generate a :ref:`Nodes` dataframe from a list of coordinates. See :meth:`~veroviz.generateNodes.generateNodes` for other methods to generate "nodes" dataframes.
>>> locs = [
... [42.1538, -78.4253],
... [42.3465, -78.6234],
... [42.6343, -78.1146]]
>>> exampleNodes = vrv.createNodesFromLocs(locs=locs)
Example 1 - Calculate 'all2all' travel matrices for a drone with a 'square' flight profile. There are 3 nodes, so the matrices will be 3x3.
>>> [totalTime, totalGroundDistance, totalFlightDistance] = vrv.getTimeDist3D(
... nodes = exampleNodes,
... routeType = 'square',
... cruiseAltMetersAGL = 120,
... takeoffSpeedMPS = 5,
... cruiseSpeedMPS = 12,
... landSpeedMPS = 2,
... outputDistUnits = 'meters',
... outputTimeUnits = 'seconds')
>>> print("Travel time from node 2 to node 3 is %.2f seconds" % (totalTime[2, 3]))
>>> print("Ground distance from node 2 to node 3 is %.2f meters" % (totalGroundDistance[2, 3]))
>>> print("Total flight distance from node 2 to node 3 is %.2f meters" % (totalFlightDistance[2, 3]))
Example 2 - Calculate 'one2many' travel matrices for a drone with a 'trapezoidal' flight profile, starting from node 2. All functional arguments are included in this example.
>>> [timeSec, groundDist, totalDist] = vrv.getTimeDist3D(
... nodes = exampleNodes,
... matrixType = 'one2many',
... fromNodeID = 2,
... toNodeID = None,
... takeoffSpeedMPS = 5,
... cruiseSpeedMPS = 12,
... landSpeedMPS = 5,
... cruiseAltMetersAGL = 120,
... routeType = 'trapezoidal',
... climbRateMPS = 1,
... descentRateMPS = 1,
... outputDistUnits = 'meters',
... outputTimeUnits = 'seconds')
>>> print("Travel time from node 2 to node 3 is %.2f seconds" % (timeSec[2, 3]))
>>> print("Ground distance from node 2 to node 3 is %.2f meters" % (groundDist[2, 3]))
>>> print("Total flight distance from node 2 to node 3 is %.2f meters" % (totalDist[2, 3]))
"""
# validation
[valFlag, errorMsg, warningMsg] = valGetTimeDist3D(nodes, matrixType, fromNodeID, toNodeID, outputDistUnits, outputTimeUnits, routeType, takeoffSpeedMPS, climbRateMPS, cruiseSpeedMPS, cruiseAltMetersAGL, landSpeedMPS, descentRateMPS)
if (not valFlag):
print (errorMsg)
return [None, None, None]
elif (config['VRV_SETTING_SHOWWARNINGMESSAGE'] and warningMsg != ""):
print (warningMsg)
try:
matrixType = matrixType.lower()
except:
pass
# Specify the list of rows and columns of output dataframes
fromIDs = []
toIDs = []
if (matrixType == "all2all"):
fromIDs = nodes['id'].tolist()
toIDs = nodes['id'].tolist()
elif (matrixType == "one2many"):
fromIDs = [fromNodeID]
toIDs = nodes['id'].tolist()
elif (matrixType == "many2one"):
fromIDs = nodes['id'].tolist()
toIDs = [toNodeID]
else:
return
# Specify the list of coordinations, for each coordinate, it is in [lat, lon, alt] format
fromLocs = []
toLocs = []
for i in range(0, len(fromIDs)):
fromLocs.append([
float(nodes.loc[nodes['id'] == fromIDs[i], 'lat']),
float(nodes.loc[nodes['id'] == fromIDs[i], 'lon']),
float(nodes.loc[nodes['id'] == fromIDs[i], 'altMeters'])])
for i in range(0, len(toIDs)):
toLocs.append([
float(nodes.loc[nodes['id'] == toIDs[i], 'lat']),
float(nodes.loc[nodes['id'] == toIDs[i], 'lon']),
float(nodes.loc[nodes['id'] == toIDs[i], 'altMeters'])])
# Do queries to find DICTIONARIES of distance and time matrices
totalTimeSec = {}
totalGroundDistMeters = {}
totalFlightDistMeters = {}
for i in range(len(fromLocs)):
for j in range(i, len(toLocs)):
# Prepare for fields to generate flight
startLoc = fromLocs[i]
endLoc = toLocs[j]
if (i != j):
# The flight has no loitering
flight = buildNoLoiteringFlight(routeType, startLoc, cruiseAltMetersAGL, endLoc, takeoffSpeedMPS, climbRateMPS, cruiseSpeedMPS, landSpeedMPS, descentRateMPS)
# Time and ground/flight distance, notice the matrix is symmetric
[time, groundDistance, flightDistance] = getTimeDistFromFlight(flight.copy())
totalTimeSec[i, j] = time
totalTimeSec[j, i] = time
totalGroundDistMeters[i, j] = groundDistance
totalGroundDistMeters[j, i] = groundDistance
totalFlightDistMeters[i, j] = flightDistance
totalFlightDistMeters[j, i] = flightDistance
else:
totalTimeSec[i, j] = 0
totalGroundDistMeters[i, j] = 0
totalFlightDistMeters[i, j] = 0
# Rename the keyvalues by fromRows and toCols and reset output units
totalTime = {}
totalGroundDistance = {}
totalFlightDistance = {}
for i in range(len(fromIDs)):
for j in range(len(toIDs)):
totalTime[fromIDs[i], toIDs[j]] = totalTimeSec[i, j] * privConvertTime(1.0, 's', outputTimeUnits)
totalGroundDistance[fromIDs[i], toIDs[j]] = totalGroundDistMeters[i, j] * privConvertDistance(1.0, 'm', outputDistUnits)
totalFlightDistance[fromIDs[i], toIDs[j]] = totalFlightDistMeters[i, j] * privConvertDistance(1.0, 'm', outputDistUnits)
return [totalTime, totalGroundDistance, totalFlightDistance]
| 71.865385 | 1,154 | 0.711466 | from veroviz._common import *
from veroviz._validation import *
from veroviz._buildFlightProfile import buildNoLoiteringFlight
from veroviz._buildFlightProfile import getTimeDistFromFlight
from veroviz._utilities import privConvertDistance
from veroviz._utilities import privConvertTime
def getTimeDist3D(nodes=None, matrixType='all2all', fromNodeID=None, toNodeID=None, takeoffSpeedMPS=None, cruiseSpeedMPS=None, landSpeedMPS=None, cruiseAltMetersAGL=None,
routeType='square', climbRateMPS=None, descentRateMPS=None, outputDistUnits='meters', outputTimeUnits='seconds'):
[valFlag, errorMsg, warningMsg] = valGetTimeDist3D(nodes, matrixType, fromNodeID, toNodeID, outputDistUnits, outputTimeUnits, routeType, takeoffSpeedMPS, climbRateMPS, cruiseSpeedMPS, cruiseAltMetersAGL, landSpeedMPS, descentRateMPS)
if (not valFlag):
print (errorMsg)
return [None, None, None]
elif (config['VRV_SETTING_SHOWWARNINGMESSAGE'] and warningMsg != ""):
print (warningMsg)
try:
matrixType = matrixType.lower()
except:
pass
fromIDs = []
toIDs = []
if (matrixType == "all2all"):
fromIDs = nodes['id'].tolist()
toIDs = nodes['id'].tolist()
elif (matrixType == "one2many"):
fromIDs = [fromNodeID]
toIDs = nodes['id'].tolist()
elif (matrixType == "many2one"):
fromIDs = nodes['id'].tolist()
toIDs = [toNodeID]
else:
return
fromLocs = []
toLocs = []
for i in range(0, len(fromIDs)):
fromLocs.append([
float(nodes.loc[nodes['id'] == fromIDs[i], 'lat']),
float(nodes.loc[nodes['id'] == fromIDs[i], 'lon']),
float(nodes.loc[nodes['id'] == fromIDs[i], 'altMeters'])])
for i in range(0, len(toIDs)):
toLocs.append([
float(nodes.loc[nodes['id'] == toIDs[i], 'lat']),
float(nodes.loc[nodes['id'] == toIDs[i], 'lon']),
float(nodes.loc[nodes['id'] == toIDs[i], 'altMeters'])])
totalTimeSec = {}
totalGroundDistMeters = {}
totalFlightDistMeters = {}
for i in range(len(fromLocs)):
for j in range(i, len(toLocs)):
startLoc = fromLocs[i]
endLoc = toLocs[j]
if (i != j):
flight = buildNoLoiteringFlight(routeType, startLoc, cruiseAltMetersAGL, endLoc, takeoffSpeedMPS, climbRateMPS, cruiseSpeedMPS, landSpeedMPS, descentRateMPS)
[time, groundDistance, flightDistance] = getTimeDistFromFlight(flight.copy())
totalTimeSec[i, j] = time
totalTimeSec[j, i] = time
totalGroundDistMeters[i, j] = groundDistance
totalGroundDistMeters[j, i] = groundDistance
totalFlightDistMeters[i, j] = flightDistance
totalFlightDistMeters[j, i] = flightDistance
else:
totalTimeSec[i, j] = 0
totalGroundDistMeters[i, j] = 0
totalFlightDistMeters[i, j] = 0
totalTime = {}
totalGroundDistance = {}
totalFlightDistance = {}
for i in range(len(fromIDs)):
for j in range(len(toIDs)):
totalTime[fromIDs[i], toIDs[j]] = totalTimeSec[i, j] * privConvertTime(1.0, 's', outputTimeUnits)
totalGroundDistance[fromIDs[i], toIDs[j]] = totalGroundDistMeters[i, j] * privConvertDistance(1.0, 'm', outputDistUnits)
totalFlightDistance[fromIDs[i], toIDs[j]] = totalFlightDistMeters[i, j] * privConvertDistance(1.0, 'm', outputDistUnits)
return [totalTime, totalGroundDistance, totalFlightDistance]
| true | true |
f72f19389baf6383e97fcdeeb7e0ff036c8208a5 | 1,325 | py | Python | youtuatools/extractor/unity.py | Pagasis/YouTua | edb44b2065a7224f8b26aaf76166bf7287901567 | [
"MIT"
] | 47 | 2021-01-02T07:44:50.000Z | 2022-02-28T22:02:13.000Z | nextdl/extractor/unity.py | devenu85/nextdl | 0b458f556e2e0be80cb94bd9a9b1405ad2e9182d | [
"MIT"
] | 4 | 2021-02-07T03:35:13.000Z | 2021-10-31T19:23:53.000Z | nextdl/extractor/unity.py | devenu85/nextdl | 0b458f556e2e0be80cb94bd9a9b1405ad2e9182d | [
"MIT"
] | 8 | 2021-01-03T05:44:39.000Z | 2021-11-01T05:46:32.000Z | from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
class UnityIE(InfoExtractor):
_VALID_URL = (
r"https?://(?:www\.)?unity3d\.com/learn/tutorials/(?:[^/]+/)*(?P<id>[^/?#&]+)"
)
_TESTS = [
{
"url": "https://unity3d.com/learn/tutorials/topics/animation/animate-anything-mecanim",
"info_dict": {
"id": "jWuNtik0C8E",
"ext": "mp4",
"title": "Live Training 22nd September 2014 - Animate Anything",
"description": "md5:e54913114bd45a554c56cdde7669636e",
"duration": 2893,
"uploader": "Unity",
"uploader_id": "Unity3D",
"upload_date": "20140926",
},
},
{
"url": "https://unity3d.com/learn/tutorials/projects/2d-ufo-tutorial/following-player-camera?playlist=25844",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
youtube_id = self._search_regex(
r'data-video-id="([_0-9a-zA-Z-]+)"', webpage, "youtube ID"
)
return self.url_result(youtube_id, ie=YoutubeIE.ie_key(), video_id=video_id)
| 34.868421 | 121 | 0.556226 | from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
class UnityIE(InfoExtractor):
_VALID_URL = (
r"https?://(?:www\.)?unity3d\.com/learn/tutorials/(?:[^/]+/)*(?P<id>[^/?#&]+)"
)
_TESTS = [
{
"url": "https://unity3d.com/learn/tutorials/topics/animation/animate-anything-mecanim",
"info_dict": {
"id": "jWuNtik0C8E",
"ext": "mp4",
"title": "Live Training 22nd September 2014 - Animate Anything",
"description": "md5:e54913114bd45a554c56cdde7669636e",
"duration": 2893,
"uploader": "Unity",
"uploader_id": "Unity3D",
"upload_date": "20140926",
},
},
{
"url": "https://unity3d.com/learn/tutorials/projects/2d-ufo-tutorial/following-player-camera?playlist=25844",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
youtube_id = self._search_regex(
r'data-video-id="([_0-9a-zA-Z-]+)"', webpage, "youtube ID"
)
return self.url_result(youtube_id, ie=YoutubeIE.ie_key(), video_id=video_id)
| true | true |
f72f19778b0b87aa95ac05b779fa868ad2422b2a | 256 | py | Python | pybat/__init__.py | mbercx/pybat | e0cf610fd06a97979f5ec70757406de1f9a788ef | [
"MIT"
] | 3 | 2019-04-08T13:10:15.000Z | 2021-07-04T07:23:49.000Z | pybat/__init__.py | mbercx/pybat | e0cf610fd06a97979f5ec70757406de1f9a788ef | [
"MIT"
] | 1 | 2019-02-28T12:51:57.000Z | 2019-02-28T12:51:57.000Z | pybat/__init__.py | mbercx/pybat | e0cf610fd06a97979f5ec70757406de1f9a788ef | [
"MIT"
] | 4 | 2018-07-30T12:58:35.000Z | 2020-03-05T20:09:46.000Z | # These import commands make importing core classes easier, e.g. you can just import
# Cathode using:
#
# from pybat import Cathode
#
# Instead of:
#
# from pybat.core import Cathode
#
from pybat.core import Cathode, LiRichCathode, Dimer, DimerNEBAnalysis
| 23.272727 | 84 | 0.765625 |
from pybat.core import Cathode, LiRichCathode, Dimer, DimerNEBAnalysis
| true | true |
f72f19dca994b161669b0dcdd2b3c6e2c7ce4f8d | 308 | py | Python | src/scripts/chmTools/module.py | Matej-Chmel/approximate-knn | 4d29dc285f50fcdce1c3052472959f789c46cc70 | [
"MIT"
] | null | null | null | src/scripts/chmTools/module.py | Matej-Chmel/approximate-knn | 4d29dc285f50fcdce1c3052472959f789c46cc70 | [
"MIT"
] | null | null | null | src/scripts/chmTools/module.py | Matej-Chmel/approximate-knn | 4d29dc285f50fcdce1c3052472959f789c46cc70 | [
"MIT"
] | null | null | null | from .runner import AppError, checkInsideVenv, insideVenv, wrapMain
checkInsideVenv()
from .configParam import getConfigPath
from .Dataset import Dataset
from .export import getExportedData
from .jsonTypeCheck import configStr, getDictValue, getRoot
from .RecallTable import RecallTable, RecallTableConfig
| 30.8 | 67 | 0.847403 | from .runner import AppError, checkInsideVenv, insideVenv, wrapMain
checkInsideVenv()
from .configParam import getConfigPath
from .Dataset import Dataset
from .export import getExportedData
from .jsonTypeCheck import configStr, getDictValue, getRoot
from .RecallTable import RecallTable, RecallTableConfig
| true | true |
f72f1ae9e9ef3f419c5a8e7fe69af1c394dd4b59 | 285 | py | Python | exercicios/ex020.py | MaikolSantos/curso-em-video-python3 | 3a1ab2761b8a0f98e128083a7b0e50b19a75b7bf | [
"MIT"
] | null | null | null | exercicios/ex020.py | MaikolSantos/curso-em-video-python3 | 3a1ab2761b8a0f98e128083a7b0e50b19a75b7bf | [
"MIT"
] | null | null | null | exercicios/ex020.py | MaikolSantos/curso-em-video-python3 | 3a1ab2761b8a0f98e128083a7b0e50b19a75b7bf | [
"MIT"
] | null | null | null | from random import shuffle
a1 = input('Digite o nome do Aluno 1: ')
a2 = input('Digite o nome do Aluno 2: ')
a3 = input('Digite o nome do Aluno 3: ')
a4 = input('Digite o nome do Aluno 4: ')
deck = [a1, a2, a3, a4]
shuffle(deck)
print('A ordem de apresentação será: {}'.format(deck))
| 28.5 | 54 | 0.663158 | from random import shuffle
a1 = input('Digite o nome do Aluno 1: ')
a2 = input('Digite o nome do Aluno 2: ')
a3 = input('Digite o nome do Aluno 3: ')
a4 = input('Digite o nome do Aluno 4: ')
deck = [a1, a2, a3, a4]
shuffle(deck)
print('A ordem de apresentação será: {}'.format(deck))
| true | true |
f72f1b1556e1450a0a48cf17145914764f019c19 | 45,265 | py | Python | gdal2ISIS3/Astropedia_gdal2ISIS3.py | europlanet-gmap/GDAL_scripts | 3352d2ca8580230f2f4ce7712ddd23743ca14d59 | [
"Unlicense"
] | 55 | 2015-08-16T02:57:04.000Z | 2021-09-09T18:22:45.000Z | gdal2ISIS3/Astropedia_gdal2ISIS3.py | europlanet-gmap/GDAL_scripts | 3352d2ca8580230f2f4ce7712ddd23743ca14d59 | [
"Unlicense"
] | 14 | 2016-09-12T07:09:59.000Z | 2022-02-22T16:43:29.000Z | gdal2ISIS3/Astropedia_gdal2ISIS3.py | europlanet-gmap/GDAL_scripts | 3352d2ca8580230f2f4ce7712ddd23743ca14d59 | [
"Unlicense"
] | 34 | 2015-01-27T14:28:57.000Z | 2022-02-23T13:00:46.000Z | #!/usr/bin/env python
#/******************************************************************************
# * $Id$
# *
# * Project: GDAL Utilities
# * Purpose: Create a ISIS3 compatible (raw w/ ISIS3 label) from a GDAL supported image.
# * Author: Trent Hare, <thare@usgs.gov>
# * Date: June 05, 2013
# * version: 0.1
# *
# * Port from gdalinfo.py whose author is Even Rouault
# ******************************************************************************
# * Copyright (c) 2010, Even Rouault
# * Copyright (c) 1998, Frank Warmerdam
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
import math
import datetime
import time
import os
import subprocess
try:
from osgeo import gdal
from osgeo import osr
except:
import gdal
import osr
#/************************************************************************/
#/* Usage() */
#/************************************************************************/
def Usage(theApp):
print( '\nUsage: Astropedia_gdal2ISIS3.py in.tif output.cub') # % theApp)
print( ' optional: to print out image information also send -debug')
print( ' optional: to just get a label *.lbl, send -noimage')
print( ' optional: to attach the *.lbl to the ISIS image -attach - requires ISIS3')
print( ' optional: to get lonsys=360, send -force360')
print( ' optional: to override the center Longitude, send -centerLon 180')
print( ' optional: to set scaler and offset send -base 17374000 and/or -multiplier 0.5')
print( 'Usage: Astropedia_gdal2ISIS3.py -debug in.cub output.cub\n') # % theApp)
print( 'Note: Currently this routine will only work for a limited set of images\n')
sys.exit(1)
def EQUAL(a, b):
return a.lower() == b.lower()
#/************************************************************************/
#/* main() */
#/************************************************************************/
def main( argv = None ):
bComputeMinMax = False
bSample = False
bShowGCPs = True
bShowMetadata = False
bShowRAT=False
debug = False
attach = False
bStats = False
bApproxStats = True
bShowColorTable = True
bComputeChecksum = False
bReportHistograms = False
pszFilename = None
papszExtraMDDomains = [ ]
pszProjection = None
hTransform = None
bShowFileList = True
dst_cub = None
dst_lbl = None
dst_hst = None
bands = 1
centLat = 0
centLon = 0
centerLon = False
TMscale = 1.0
UpperLeftCornerX = 0
UpperLeftCornerY = 0
falseEast = 0
falseNorth = 0
bMakeImage = True
force360 = False
base = None
multiplier = None
#/* Must process GDAL_SKIP before GDALAllRegister(), but we can't call */
#/* GDALGeneralCmdLineProcessor before it needs the drivers to be registered */
#/* for the --format or --formats options */
#for( i = 1; i < argc; i++ )
#{
# if EQUAL(argv[i],"--config") and i + 2 < argc and EQUAL(argv[i + 1], "GDAL_SKIP"):
# {
# CPLSetConfigOption( argv[i+1], argv[i+2] );
#
# i += 2;
# }
#}
#
#GDALAllRegister();
if argv is None:
argv = sys.argv
argv = gdal.GeneralCmdLineProcessor( argv )
if argv is None:
return 1
nArgc = len(argv)
#/* -------------------------------------------------------------------- */
#/* Parse arguments. */
#/* -------------------------------------------------------------------- */
i = 1
while i < nArgc:
if EQUAL(argv[i], "--utility_version"):
print("%s is running against GDAL %s" %
(argv[0], gdal.VersionInfo("RELEASE_NAME")))
return 0
elif EQUAL(argv[i], "-debug"):
debug = True
elif EQUAL(argv[i], "-attach"):
attach = True
elif EQUAL(argv[i], "-force360"):
force360 = True
elif EQUAL(argv[i], "-centerLon"):
i = i + 1
centerLon = float(argv[i])
elif EQUAL(argv[i], "-mm"):
bComputeMinMax = True
elif EQUAL(argv[i], "-hist"):
bReportHistograms = True
elif EQUAL(argv[i], "-stats"):
bStats = True
bApproxStats = False
elif EQUAL(argv[i], "-approx_stats"):
bStats = True
bApproxStats = True
elif EQUAL(argv[i], "-sample"):
bSample = True
elif EQUAL(argv[i], "-checksum"):
bComputeChecksum = True
elif EQUAL(argv[i], "-nogcp"):
bShowGCPs = False
elif EQUAL(argv[i], "-nomd"):
bShowMetadata = False
elif EQUAL(argv[i], "-norat"):
bShowRAT = False
elif EQUAL(argv[i], "-noct"):
bShowColorTable = False
elif EQUAL(argv[i], "-mdd") and i < nArgc-1:
i = i + 1
papszExtraMDDomains.append( argv[i] )
elif EQUAL(argv[i], "-nofl"):
bShowFileList = False
elif EQUAL(argv[i], "-noimage"):
bMakeImage = False
elif EQUAL(argv[i], "-base"):
i = i + 1
base = float(argv[i])
elif EQUAL(argv[i], "-multiplier"):
i = i + 1
multiplier = float(argv[i])
elif argv[i][0] == '-':
return Usage(argv[0])
elif pszFilename is None:
pszFilename = argv[i]
elif dst_cub is None:
dst_cub = argv[i]
else:
return Usage(argv[0])
i = i + 1
if pszFilename is None:
return Usage(argv[0])
if dst_cub is None:
return Usage(argv[0])
#/* -------------------------------------------------------------------- */
#/* Open dataset. */
#/* -------------------------------------------------------------------- */
hDataset = gdal.Open( pszFilename, gdal.GA_ReadOnly )
if hDataset is None:
print("gdalinfo failed - unable to open '%s'." % pszFilename )
sys.exit(1)
# Open the output file.
if dst_cub is not None:
dst_lbl = dst_cub.replace("CUB","LBL")
dst_lbl = dst_lbl.replace("cub","lbl")
dst_hst = dst_cub.replace("CUB","History.IsisCube")
dst_hst = dst_hst.replace("cub","History.IsisCube")
dst_hdr = dst_cub.replace("CUB","hdr")
dst_hdr = dst_hdr.replace("cub","hdr")
dst_aux = dst_cub.replace("CUB","cub.aux.xml")
dst_aux = dst_aux.replace("cub","cub.aux.xml")
if attach:
attach_cub = dst_cub
dst_cub = "XXX"+dst_cub
dst_lbl = "XXX"+dst_lbl
dst_hst = "XXX"+dst_hst
dst_hdr = "XXX"+dst_hdr
dst_aux = "XXX"+dst_aux
if (EQUAL(dst_lbl,dst_cub)):
print('Extension must be .CUB or .cub - unable to run using filename: %s' % pszFilename )
sys.exit(1)
else:
f = open(dst_lbl,'wt')
f_hst = open(dst_hst,'wt')
# else:
# f = sys.stdout
# dst_cub = "out.cub"
#/* -------------------------------------------------------------------- */
#/* Report general info. */
#/* -------------------------------------------------------------------- */
hDriver = hDataset.GetDriver();
if debug:
print( "Driver: %s/%s" % ( \
hDriver.ShortName, \
hDriver.LongName ))
papszFileList = hDataset.GetFileList();
if papszFileList is None or len(papszFileList) == 0:
print( "Files: none associated" )
else:
if debug:
print( "Files: %s" % papszFileList[0] )
if bShowFileList:
for i in range(1, len(papszFileList)):
print( " %s" % papszFileList[i] )
if debug:
print( "Size is %d, %d" % (hDataset.RasterXSize, hDataset.RasterYSize))
#/* -------------------------------------------------------------------- */
#/* Report projection. */
#/* -------------------------------------------------------------------- */
pszProjection = hDataset.GetProjectionRef()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
if debug:
print( "Coordinate System is:\n%s" % pszPrettyWkt )
else:
if debug:
print( "Coordinate System is `%s'" % pszProjection )
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
#print( "Coordinate System is:\n%s" % pszPrettyWkt )
mapProjection = "None"
#Extract projection information
target = hSRS.GetAttrValue("DATUM",0)
target = target.replace("D_","").replace("_2000","").replace("GCS_","")
semiMajor = hSRS.GetSemiMajor()
semiMinor = hSRS.GetSemiMinor()
if (pszProjection[0:6] == "GEOGCS"):
mapProjection = "SimpleCylindrical"
centLon = hSRS.GetProjParm('central_meridian')
if (pszProjection[0:6] == "PROJCS"):
mapProjection = hSRS.GetAttrValue("PROJECTION",0)
if EQUAL(mapProjection,"Sinusoidal"):
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Equirectangular"):
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Transverse_Mercator"):
mapProjection = "TransverseMercator"
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
TMscale = hSRS.GetProjParm('scale_factor')
#Need to research when TM actually applies false values
falseEast = hSRS.GetProjParm('false_easting')
falseNorth = hSRS.GetProjParm('false_northing')
if EQUAL(mapProjection,"Orthographic"):
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Mercator_1SP"):
mapProjection = "Mercator"
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Mercator"):
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Polar_Stereographic"):
mapProjection = "PolarStereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Stereographic_South_Pole"):
mapProjection = "PolarStereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Stereographic_North_Pole"):
mapProjection = "PolarStereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if debug:
print( "Coordinate System is:\n%s" % pszPrettyWkt )
else:
print( "Warning - Currently we can't parse this type of projection" )
print( "Coordinate System is `%s'" % pszProjection )
target = "n/a"
#sys.exit(1)
else:
print( "Warning - No Coordinate System defined:\n" )
target = "n/a"
#sys.exit(1)
#/* -------------------------------------------------------------------- */
#/* Report Geotransform. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
UpperLeftCornerX = adfGeoTransform[0] - falseEast
UpperLeftCornerY = adfGeoTransform[3] - falseNorth
if adfGeoTransform[2] == 0.0 and adfGeoTransform[4] == 0.0:
if debug:
print( "Origin = (%.15f,%.15f)" % ( \
adfGeoTransform[0], adfGeoTransform[3] ))
print( "Pixel Size = (%.15f,%.15f)" % ( \
adfGeoTransform[1], adfGeoTransform[5] ))
else:
if debug:
print( "GeoTransform =\n" \
" %.16g, %.16g, %.16g\n" \
" %.16g, %.16g, %.16g" % ( \
adfGeoTransform[0], \
adfGeoTransform[1], \
adfGeoTransform[2], \
adfGeoTransform[3], \
adfGeoTransform[4], \
adfGeoTransform[5] ))
#Using a very simple method to calculate cellsize.
#Warning: might not always be good.
if (pszProjection[0:6] == "GEOGCS"):
#convert degrees/pixel to m/pixel
mapres = 1 / adfGeoTransform[1]
mres = adfGeoTransform[1] * (semiMajor * math.pi / 180.0)
else:
#convert m/pixel to pixel/degree
mapres = 1 / (adfGeoTransform[1] / (semiMajor * math.pi / 180.0))
mres = adfGeoTransform[1]
#/* -------------------------------------------------------------------- */
#/* Report GCPs. */
#/* -------------------------------------------------------------------- */
if bShowGCPs and hDataset.GetGCPCount() > 0:
pszProjection = hDataset.GetGCPProjection()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
if debug:
print( "GCP Projection = \n%s" % pszPrettyWkt )
else:
if debug:
print( "GCP Projection = %s" % \
pszProjection )
gcps = hDataset.GetGCPs()
i = 0
for gcp in gcps:
if debug:
print( "GCP[%3d]: Id=%s, Info=%s\n" \
" (%.15g,%.15g) -> (%.15g,%.15g,%.15g)" % ( \
i, gcp.Id, gcp.Info, \
gcp.GCPPixel, gcp.GCPLine, \
gcp.GCPX, gcp.GCPY, gcp.GCPZ ))
i = i + 1
#/* -------------------------------------------------------------------- */
#/* Report metadata. */
#/* -------------------------------------------------------------------- */
if debug:
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
for extra_domain in papszExtraMDDomains:
papszMetadata = hDataset.GetMetadata_List(extra_domain)
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata (%s):" % extra_domain)
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report "IMAGE_STRUCTURE" metadata. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report subdatasets. */
#/* -------------------------------------------------------------------- */
papszMetadata = hDataset.GetMetadata_List("SUBDATASETS")
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Subdatasets:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report geolocation. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("GEOLOCATION")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Geolocation:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report RPCs */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("RPC")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "RPC Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Setup projected to lat/long transform if appropriate. */
#/* -------------------------------------------------------------------- */
if pszProjection is not None and len(pszProjection) > 0:
hProj = osr.SpatialReference( pszProjection )
if hProj is not None:
hLatLong = hProj.CloneGeogCS()
if hLatLong is not None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
hTransform = osr.CoordinateTransformation( hProj, hLatLong )
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find( 'Unable to load PROJ.4 library' ) != -1:
hTransform = None
#/* -------------------------------------------------------------------- */
#/* Report corners. */
#/* -------------------------------------------------------------------- */
if debug:
print( "Corner Coordinates:" )
GDALInfoReportCorner( hDataset, hTransform, "Upper Left", \
0.0, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Left", \
0.0, hDataset.RasterYSize);
GDALInfoReportCorner( hDataset, hTransform, "Upper Right", \
hDataset.RasterXSize, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Right", \
hDataset.RasterXSize, \
hDataset.RasterYSize );
GDALInfoReportCorner( hDataset, hTransform, "Center", \
hDataset.RasterXSize/2.0, \
hDataset.RasterYSize/2.0 );
#Get bounds
ulx = GDALGetLon( hDataset, hTransform, 0.0, 0.0 );
uly = GDALGetLat( hDataset, hTransform, 0.0, 0.0 );
lrx = GDALGetLon( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
lry = GDALGetLat( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
if (centerLon):
centLon = centerLon
#Calculate Simple Cylindrical X,Y in meters from bounds if not projected.
#Needs testing.
if (pszProjection[0:6] == "GEOGCS"):
#note that: mres = adfGeoTransform[1] * (semiMajor * math.pi / 180.0)
UpperLeftCornerX = semiMajor * (ulx - centLon) * math.pi / 180.0
UpperLeftCornerY = semiMajor * uly * math.pi / 180.0
#/* ==================================================================== */
#/* Loop over bands. */
#/* ==================================================================== */
if debug:
bands = hDataset.RasterCount
for iBand in range(hDataset.RasterCount):
hBand = hDataset.GetRasterBand(iBand+1 )
#if( bSample )
#{
# float afSample[10000];
# int nCount;
#
# nCount = GDALGetRandomRasterSample( hBand, 10000, afSample );
# print( "Got %d samples.\n", nCount );
#}
(nBlockXSize, nBlockYSize) = hBand.GetBlockSize()
print( "Band %d Block=%dx%d Type=%s, ColorInterp=%s" % ( iBand+1, \
nBlockXSize, nBlockYSize, \
gdal.GetDataTypeName(hBand.DataType), \
gdal.GetColorInterpretationName( \
hBand.GetRasterColorInterpretation()) ))
if hBand.GetDescription() is not None \
and len(hBand.GetDescription()) > 0 :
print( " Description = %s" % hBand.GetDescription() )
dfMin = hBand.GetMinimum()
dfMax = hBand.GetMaximum()
if dfMin is not None or dfMax is not None or bComputeMinMax:
line = " "
if dfMin is not None:
line = line + ("Min=%.3f " % dfMin)
if dfMax is not None:
line = line + ("Max=%.3f " % dfMax)
if bComputeMinMax:
gdal.ErrorReset()
adfCMinMax = hBand.ComputeRasterMinMax(False)
if gdal.GetLastErrorType() == gdal.CE_None:
line = line + ( " Computed Min/Max=%.3f,%.3f" % ( \
adfCMinMax[0], adfCMinMax[1] ))
print( line )
stats = hBand.GetStatistics( bApproxStats, bStats)
# Dirty hack to recognize if stats are valid. If invalid, the returned
# stddev is negative
if stats[3] >= 0.0:
print( " Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
if bReportHistograms:
hist = hBand.GetDefaultHistogram(force = True, callback = gdal.TermProgress)
if hist is not None:
dfMin = hist[0]
dfMax = hist[1]
nBucketCount = hist[2]
panHistogram = hist[3]
print( " %d buckets from %g to %g:" % ( \
nBucketCount, dfMin, dfMax ))
line = ' '
for bucket in panHistogram:
line = line + ("%d " % bucket)
print(line)
if bComputeChecksum:
print( " Checksum=%d" % hBand.Checksum())
dfNoData = hBand.GetNoDataValue()
if dfNoData is not None:
if dfNoData != dfNoData:
print( " NoData Value=nan" )
else:
print( " NoData Value=%.18g" % dfNoData )
if hBand.GetOverviewCount() > 0:
line = " Overviews: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0 :
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%dx%d" % (hOverview.XSize, hOverview.YSize))
pszResampling = \
hOverview.GetMetadataItem( "RESAMPLING", "" )
if pszResampling is not None \
and len(pszResampling) >= 12 \
and EQUAL(pszResampling[0:12],"AVERAGE_BIT2"):
line = line + "*"
else:
line = line + "(null)"
print(line)
if bComputeChecksum:
line = " Overviews checksum: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
print(line)
if hBand.HasArbitraryOverviews():
print( " Overviews: arbitrary" )
nMaskFlags = hBand.GetMaskFlags()
if (nMaskFlags & (gdal.GMF_NODATA|gdal.GMF_ALL_VALID)) == 0:
hMaskBand = hBand.GetMaskBand()
line = " Mask Flags: "
if (nMaskFlags & gdal.GMF_PER_DATASET) != 0:
line = line + "PER_DATASET "
if (nMaskFlags & gdal.GMF_ALPHA) != 0:
line = line + "ALPHA "
if (nMaskFlags & gdal.GMF_NODATA) != 0:
line = line + "NODATA "
if (nMaskFlags & gdal.GMF_ALL_VALID) != 0:
line = line + "ALL_VALID "
print(line)
if hMaskBand is not None and \
hMaskBand.GetOverviewCount() > 0:
line = " Overviews of mask band: "
for iOverview in range(hMaskBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hMaskBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
if len(hBand.GetUnitType()) > 0:
print( " Unit Type: %s" % hBand.GetUnitType())
papszCategories = hBand.GetRasterCategoryNames()
if papszCategories is not None:
print( " Categories:" );
i = 0
for category in papszCategories:
print( " %3d: %s" % (i, category) )
i = i + 1
if hBand.GetScale() != 1.0 or hBand.GetOffset() != 0.0:
print( " Offset: %.15g, Scale:%.15g" % \
( hBand.GetOffset(), hBand.GetScale()))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
hTable = hBand.GetRasterColorTable()
if hBand.GetRasterColorInterpretation() == gdal.GCI_PaletteIndex \
and hTable is not None:
print( " Color Table (%s with %d entries)" % (\
gdal.GetPaletteInterpretationName( \
hTable.GetPaletteInterpretation( )), \
hTable.GetCount() ))
if bShowColorTable:
for i in range(hTable.GetCount()):
sEntry = hTable.GetColorEntry(i)
print( " %3d: %d,%d,%d,%d" % ( \
i, \
sEntry[0],\
sEntry[1],\
sEntry[2],\
sEntry[3] ))
if bShowRAT:
hRAT = hBand.GetDefaultRAT()
#GDALRATDumpReadable( hRAT, None );
#/***************************************************************************/
#/* WriteISISlabel() */
#/***************************************************************************/
#def WriteISISLabel(outFile, DataSetID, pszFilename, sampleBits, lines, samples):
#Currently just procedural programming. Gets the job done...
#
instrList = pszFilename.split("_")
hBand = hDataset.GetRasterBand( 1 )
#get the datatype
print gdal.GetDataTypeName(hBand.DataType)
if EQUAL(gdal.GetDataTypeName(hBand.DataType), "Float32"):
sample_bits = 32
sample_type = "Real"
sample_mask = "2#11111111111111111111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "Float64"):
sample_bits = 32
sample_type = "Real"
sample_mask = "2#11111111111111111111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "INT16"):
sample_bits = 16
sample_type = "SignedWord"
sample_mask = "2#1111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "UINT16"):
sample_bits = 16
sample_type = "UsignedWord"
sample_mask = "2#1111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "Byte"):
sample_bits = 8
sample_type = "UnsignedByte"
sample_mask = "2#11111111#"
else:
print( " %s: Not supported pixel type. Please convert to 8, 16 Int, or 32 Float" % gdal.GetDataTypeName(hBand.DataType))
sys.exit(1)
f.write('Object = IsisCube\n')
f.write(' Object = Core\n')
f.write(' StartByte = 1\n')
#f.write('/* The source image data definition. */\n')
f.write(' ^Core = %s\n' % (dst_cub))
f.write(' Format = BandSequential\n')
f.write('\n')
f.write(' Group = Dimensions\n')
f.write(' Samples = %d\n' % (hDataset.RasterXSize))
f.write(' Lines = %d\n' % hDataset.RasterYSize)
f.write(' Bands = %d\n' % hDataset.RasterCount)
f.write(' End_Group\n')
f.write('\n')
f.write(' Group = Pixels\n')
f.write(' Type = %s\n' % (sample_type))
f.write(' ByteOrder = Lsb\n')
if base is None:
f.write(' Base = %.10g\n' % ( hBand.GetOffset() ))
if EQUAL(sample_type, "REAL"):
if (hBand.GetOffset() <> 0):
print("Warning: a none 0 'base' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this base value: %.10g" % ( hBand.GetOffset() ))
else:
f.write(' Base = %.10g\n' % base )
if EQUAL(sample_type, "REAL"):
print("Warning: '-base' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this base value.")
if multiplier is None:
f.write(' Multiplier = %.10g\n' % ( hBand.GetScale() ))
if EQUAL(sample_type, "REAL"):
if (hBand.GetScale() <> 1):
print("Warning: a none 1 'multiplier' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this multiplier value: %.10g" % ( hBand.GetScale() ))
else:
f.write(' Multiplier = %.10g\n' % multiplier )
if EQUAL(sample_type, "REAL"):
print("Warning: '-multiplier' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this multiplier value.")
f.write(' End_Group\n')
f.write(' End_Object\n')
f.write('\n')
f.write(' Group = Archive\n')
f.write(' DataSetId = %s\n' % pszFilename.split(".")[0])
f.write(' ProducerInstitutionName = \"Astrogeology Science Center\"\n')
f.write(' ProducerId = Astrogeology\n')
f.write(' ProducerFullName = USGS\n')
if "_v" in pszFilename:
f.write(' ProductId = %s\n' % instrList[-1].split(".")[0].upper())
else:
f.write(' ProductId = n/a\n')
f.write(' ProductVersionId = n/a\n')
f.write(' InstrumentHostName = n/a\n')
f.write(' InstrumentName = n/a\n')
f.write(' InstrumentId = n/a\n')
f.write(' TargetName = %s\n' % target)
f.write(' MissionPhaseName = n/a\n')
f.write(' End_Group\n')
f.write('\n')
if target <> "n/a":
f.write(' Group = Mapping\n')
f.write(' ProjectionName = %s\n' % mapProjection)
if ((centLon < 0) and force360):
centLon = centLon + 360
f.write(' CenterLongitude = %.5f\n' % centLon)
f.write(' CenterLatitude = %.5f\n' % centLat)
if EQUAL(mapProjection,"TransverseMercator"):
f.write(' ScaleFactor = %6.5f\n' % TMscale)
f.write(' TargetName = %s\n' % target)
f.write(' EquatorialRadius = %.1f <meters>\n' % semiMajor)
f.write(' PolarRadius = %.1f <meters>\n' % semiMinor)
if EQUAL(mapProjection,"TransverseMercator"):
f.write(' LatitudeType = Planetographic\n')
else:
f.write(' LatitudeType = Planetocentric\n')
f.write(' LongitudeDirection = PositiveEast\n')
if (force360 or (lrx > 180)):
f.write(' LongitudeDomain = 360\n')
else:
f.write(' LongitudeDomain = 180\n')
f.write(' PixelResolution = %.8f <meters/pixel>\n' % mres )
f.write(' Scale = %.4f <pixel/degree>\n' % mapres )
if lry < uly:
f.write(' MinimumLatitude = %.8f\n' % lry)
f.write(' MaximumLatitude = %.8f\n' % uly)
else:
f.write(' MinimumLatitude = %.8f\n' % uly)
f.write(' MaximumLatitude = %.8f\n' % lry)
#push into 360 domain (for Astropedia)
if (force360):
if (ulx < 0):
ulx = ulx + 360
if (lrx < 0):
lrx = lrx + 360
if lrx < ulx:
f.write(' MinimumLongitude = %.8f\n' % lrx)
f.write(' MaximumLongitude = %.8f\n' % ulx)
else:
f.write(' MinimumLongitude = %.8f\n' % ulx)
f.write(' MaximumLongitude = %.8f\n' % lrx)
f.write(' UpperLeftCornerX = %.6f <meters>\n' % ( UpperLeftCornerX ))
f.write(' UpperLeftCornerY = %.6f <meters>\n' % ( UpperLeftCornerY ))
f.write(' End_Group\n')
f.write('End_Object\n')
f.write('\n')
f.write('Object = Label\n')
#NOT correct
f.write(' Bytes = 256\n')
f.write('End_Object\n')
f.write('\n')
f.write('Object = History\n')
f.write(' Name = IsisCube\n')
f.write(' StartByte = 1\n')
#NOT correct
f.write(' Bytes = 0\n')
f.write(' ^History = %s\n' % dst_hst)
f.write('End_Object\n')
f.write('End\n')
f.close()
#remove history until we fix the size. This is causing issues with cathist
#f_hst.write('Object = Astropedia_gdal2isis.py\n')
#f_hst.write(' Version = 0.1\n')
#f_hst.write(' ProgramVersion = 2013-06-05\n')
#f_hst.write(' ExecutionDateTime = %s\n' % str(datetime.datetime.now().isoformat()))
#f_hst.write(' Description = \"Convert GDAL supported image to an ISIS detached label and raw image\"\n')
#f_hst.write('End_Object\n')
f_hst.close()
#########################
#Export out raw image
#########################
#Setup the output dataset
print (' - ISIS3 label created: %s' % dst_lbl)
print (' - ISIS3 history created: %s' % dst_hst)
if bMakeImage:
print ('Please wait, writing out raw image: %s' % dst_cub)
driver = gdal.GetDriverByName('ENVI')
output = driver.CreateCopy(dst_cub, hDataset, 1)
if attach:
#print 'sleeping 5 seconds'
#time.sleep(5)
cmd = "/usgs/cdev/contrib/bin/run_cubeatt.sh %s %s" % (dst_lbl, attach_cub)
#cmd = "cubeatt from=%s to=%s\n" % (dst_lbl, attach_cub)
print cmd
#subprocess.call(cmd, shell=True)
os.system(cmd)
os.remove(dst_cub)
os.remove(dst_lbl)
os.remove(dst_hst)
os.remove(dst_hdr)
os.remove(dst_aux)
print ('Complete')
return 0
#/************************************************************************/
#/* GDALInfoReportCorner() */
#/************************************************************************/
def GDALInfoReportCorner( hDataset, hTransform, corner_name, x, y ):
line = "%-11s " % corner_name
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
line = line + ("(%7.1f,%7.1f)" % (x, y ))
print(line)
return False
#/* -------------------------------------------------------------------- */
#/* Report the georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
if abs(dfGeoX) < 181 and abs(dfGeoY) < 91:
line = line + ( "(%12.7f,%12.7f) " % (dfGeoX, dfGeoY ))
else:
line = line + ( "(%12.3f,%12.3f) " % (dfGeoX, dfGeoY ))
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
line = line + ( "(%s," % gdal.DecToDMS( pnt[0], "Long", 2 ) )
line = line + ( "%s)" % gdal.DecToDMS( pnt[1], "Lat", 2 ) )
print(line)
return True
#/************************************************************************/
#/* GDALGetLon() */
#/************************************************************************/
def GDALGetLon( hDataset, hTransform, x, y ):
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[0]
return dfGeoX
#/************************************************************************/
#/* GDALGetLat() */
#/************************************************************************/
def GDALGetLat( hDataset, hTransform, x, y ):
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[1]
return dfGeoY
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of GetGeoTransform(can_return_null)
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
sys.exit(main(sys.argv))
| 43.15062 | 212 | 0.448956 |
import sys
import math
import datetime
import time
import os
import subprocess
try:
from osgeo import gdal
from osgeo import osr
except:
import gdal
import osr
def Usage(theApp):
print( '\nUsage: Astropedia_gdal2ISIS3.py in.tif output.cub')
print( ' optional: to print out image information also send -debug')
print( ' optional: to just get a label *.lbl, send -noimage')
print( ' optional: to attach the *.lbl to the ISIS image -attach - requires ISIS3')
print( ' optional: to get lonsys=360, send -force360')
print( ' optional: to override the center Longitude, send -centerLon 180')
print( ' optional: to set scaler and offset send -base 17374000 and/or -multiplier 0.5')
print( 'Usage: Astropedia_gdal2ISIS3.py -debug in.cub output.cub\n')
print( 'Note: Currently this routine will only work for a limited set of images\n')
sys.exit(1)
def EQUAL(a, b):
return a.lower() == b.lower()
def main( argv = None ):
bComputeMinMax = False
bSample = False
bShowGCPs = True
bShowMetadata = False
bShowRAT=False
debug = False
attach = False
bStats = False
bApproxStats = True
bShowColorTable = True
bComputeChecksum = False
bReportHistograms = False
pszFilename = None
papszExtraMDDomains = [ ]
pszProjection = None
hTransform = None
bShowFileList = True
dst_cub = None
dst_lbl = None
dst_hst = None
bands = 1
centLat = 0
centLon = 0
centerLon = False
TMscale = 1.0
UpperLeftCornerX = 0
UpperLeftCornerY = 0
falseEast = 0
falseNorth = 0
bMakeImage = True
force360 = False
base = None
multiplier = None
#/* GDALGeneralCmdLineProcessor before it needs the drivers to be registered */
#/* for the --format or --formats options */
#for( i = 1; i < argc; i++ )
#{
# if EQUAL(argv[i],"--config") and i + 2 < argc and EQUAL(argv[i + 1], "GDAL_SKIP"):
# {
# CPLSetConfigOption( argv[i+1], argv[i+2] );
#
# i += 2;
# }
#}
#
#GDALAllRegister();
if argv is None:
argv = sys.argv
argv = gdal.GeneralCmdLineProcessor( argv )
if argv is None:
return 1
nArgc = len(argv)
#/* -------------------------------------------------------------------- */
#/* Parse arguments. */
#/* -------------------------------------------------------------------- */
i = 1
while i < nArgc:
if EQUAL(argv[i], "--utility_version"):
print("%s is running against GDAL %s" %
(argv[0], gdal.VersionInfo("RELEASE_NAME")))
return 0
elif EQUAL(argv[i], "-debug"):
debug = True
elif EQUAL(argv[i], "-attach"):
attach = True
elif EQUAL(argv[i], "-force360"):
force360 = True
elif EQUAL(argv[i], "-centerLon"):
i = i + 1
centerLon = float(argv[i])
elif EQUAL(argv[i], "-mm"):
bComputeMinMax = True
elif EQUAL(argv[i], "-hist"):
bReportHistograms = True
elif EQUAL(argv[i], "-stats"):
bStats = True
bApproxStats = False
elif EQUAL(argv[i], "-approx_stats"):
bStats = True
bApproxStats = True
elif EQUAL(argv[i], "-sample"):
bSample = True
elif EQUAL(argv[i], "-checksum"):
bComputeChecksum = True
elif EQUAL(argv[i], "-nogcp"):
bShowGCPs = False
elif EQUAL(argv[i], "-nomd"):
bShowMetadata = False
elif EQUAL(argv[i], "-norat"):
bShowRAT = False
elif EQUAL(argv[i], "-noct"):
bShowColorTable = False
elif EQUAL(argv[i], "-mdd") and i < nArgc-1:
i = i + 1
papszExtraMDDomains.append( argv[i] )
elif EQUAL(argv[i], "-nofl"):
bShowFileList = False
elif EQUAL(argv[i], "-noimage"):
bMakeImage = False
elif EQUAL(argv[i], "-base"):
i = i + 1
base = float(argv[i])
elif EQUAL(argv[i], "-multiplier"):
i = i + 1
multiplier = float(argv[i])
elif argv[i][0] == '-':
return Usage(argv[0])
elif pszFilename is None:
pszFilename = argv[i]
elif dst_cub is None:
dst_cub = argv[i]
else:
return Usage(argv[0])
i = i + 1
if pszFilename is None:
return Usage(argv[0])
if dst_cub is None:
return Usage(argv[0])
#/* -------------------------------------------------------------------- */
#/* Open dataset. */
#/* -------------------------------------------------------------------- */
hDataset = gdal.Open( pszFilename, gdal.GA_ReadOnly )
if hDataset is None:
print("gdalinfo failed - unable to open '%s'." % pszFilename )
sys.exit(1)
# Open the output file.
if dst_cub is not None:
dst_lbl = dst_cub.replace("CUB","LBL")
dst_lbl = dst_lbl.replace("cub","lbl")
dst_hst = dst_cub.replace("CUB","History.IsisCube")
dst_hst = dst_hst.replace("cub","History.IsisCube")
dst_hdr = dst_cub.replace("CUB","hdr")
dst_hdr = dst_hdr.replace("cub","hdr")
dst_aux = dst_cub.replace("CUB","cub.aux.xml")
dst_aux = dst_aux.replace("cub","cub.aux.xml")
if attach:
attach_cub = dst_cub
dst_cub = "XXX"+dst_cub
dst_lbl = "XXX"+dst_lbl
dst_hst = "XXX"+dst_hst
dst_hdr = "XXX"+dst_hdr
dst_aux = "XXX"+dst_aux
if (EQUAL(dst_lbl,dst_cub)):
print('Extension must be .CUB or .cub - unable to run using filename: %s' % pszFilename )
sys.exit(1)
else:
f = open(dst_lbl,'wt')
f_hst = open(dst_hst,'wt')
# else:
# f = sys.stdout
# dst_cub = "out.cub"
#/* -------------------------------------------------------------------- */
#/* Report general info. */
#/* -------------------------------------------------------------------- */
hDriver = hDataset.GetDriver();
if debug:
print( "Driver: %s/%s" % ( \
hDriver.ShortName, \
hDriver.LongName ))
papszFileList = hDataset.GetFileList();
if papszFileList is None or len(papszFileList) == 0:
print( "Files: none associated" )
else:
if debug:
print( "Files: %s" % papszFileList[0] )
if bShowFileList:
for i in range(1, len(papszFileList)):
print( " %s" % papszFileList[i] )
if debug:
print( "Size is %d, %d" % (hDataset.RasterXSize, hDataset.RasterYSize))
#/* -------------------------------------------------------------------- */
#/* Report projection. */
#/* -------------------------------------------------------------------- */
pszProjection = hDataset.GetProjectionRef()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
if debug:
print( "Coordinate System is:\n%s" % pszPrettyWkt )
else:
if debug:
print( "Coordinate System is `%s'" % pszProjection )
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
mapProjection = "None"
target = hSRS.GetAttrValue("DATUM",0)
target = target.replace("D_","").replace("_2000","").replace("GCS_","")
semiMajor = hSRS.GetSemiMajor()
semiMinor = hSRS.GetSemiMinor()
if (pszProjection[0:6] == "GEOGCS"):
mapProjection = "SimpleCylindrical"
centLon = hSRS.GetProjParm('central_meridian')
if (pszProjection[0:6] == "PROJCS"):
mapProjection = hSRS.GetAttrValue("PROJECTION",0)
if EQUAL(mapProjection,"Sinusoidal"):
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Equirectangular"):
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Transverse_Mercator"):
mapProjection = "TransverseMercator"
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
TMscale = hSRS.GetProjParm('scale_factor')
falseEast = hSRS.GetProjParm('false_easting')
falseNorth = hSRS.GetProjParm('false_northing')
if EQUAL(mapProjection,"Orthographic"):
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Mercator_1SP"):
mapProjection = "Mercator"
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Mercator"):
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Polar_Stereographic"):
mapProjection = "PolarStereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Stereographic_South_Pole"):
mapProjection = "PolarStereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if EQUAL(mapProjection,"Stereographic_North_Pole"):
mapProjection = "PolarStereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
if debug:
print( "Coordinate System is:\n%s" % pszPrettyWkt )
else:
print( "Warning - Currently we can't parse this type of projection" )
print( "Coordinate System is `%s'" % pszProjection )
target = "n/a"
else:
print( "Warning - No Coordinate System defined:\n" )
target = "n/a"
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
UpperLeftCornerX = adfGeoTransform[0] - falseEast
UpperLeftCornerY = adfGeoTransform[3] - falseNorth
if adfGeoTransform[2] == 0.0 and adfGeoTransform[4] == 0.0:
if debug:
print( "Origin = (%.15f,%.15f)" % ( \
adfGeoTransform[0], adfGeoTransform[3] ))
print( "Pixel Size = (%.15f,%.15f)" % ( \
adfGeoTransform[1], adfGeoTransform[5] ))
else:
if debug:
print( "GeoTransform =\n" \
" %.16g, %.16g, %.16g\n" \
" %.16g, %.16g, %.16g" % ( \
adfGeoTransform[0], \
adfGeoTransform[1], \
adfGeoTransform[2], \
adfGeoTransform[3], \
adfGeoTransform[4], \
adfGeoTransform[5] ))
if (pszProjection[0:6] == "GEOGCS"):
mapres = 1 / adfGeoTransform[1]
mres = adfGeoTransform[1] * (semiMajor * math.pi / 180.0)
else:
mapres = 1 / (adfGeoTransform[1] / (semiMajor * math.pi / 180.0))
mres = adfGeoTransform[1]
if bShowGCPs and hDataset.GetGCPCount() > 0:
pszProjection = hDataset.GetGCPProjection()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
if debug:
print( "GCP Projection = \n%s" % pszPrettyWkt )
else:
if debug:
print( "GCP Projection = %s" % \
pszProjection )
gcps = hDataset.GetGCPs()
i = 0
for gcp in gcps:
if debug:
print( "GCP[%3d]: Id=%s, Info=%s\n" \
" (%.15g,%.15g) -> (%.15g,%.15g,%.15g)" % ( \
i, gcp.Id, gcp.Info, \
gcp.GCPPixel, gcp.GCPLine, \
gcp.GCPX, gcp.GCPY, gcp.GCPZ ))
i = i + 1
if debug:
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
for extra_domain in papszExtraMDDomains:
papszMetadata = hDataset.GetMetadata_List(extra_domain)
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata (%s):" % extra_domain)
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
papszMetadata = hDataset.GetMetadata_List("SUBDATASETS")
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Subdatasets:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("GEOLOCATION")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Geolocation:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("RPC")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "RPC Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if pszProjection is not None and len(pszProjection) > 0:
hProj = osr.SpatialReference( pszProjection )
if hProj is not None:
hLatLong = hProj.CloneGeogCS()
if hLatLong is not None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
hTransform = osr.CoordinateTransformation( hProj, hLatLong )
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find( 'Unable to load PROJ.4 library' ) != -1:
hTransform = None
if debug:
print( "Corner Coordinates:" )
GDALInfoReportCorner( hDataset, hTransform, "Upper Left", \
0.0, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Left", \
0.0, hDataset.RasterYSize);
GDALInfoReportCorner( hDataset, hTransform, "Upper Right", \
hDataset.RasterXSize, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Right", \
hDataset.RasterXSize, \
hDataset.RasterYSize );
GDALInfoReportCorner( hDataset, hTransform, "Center", \
hDataset.RasterXSize/2.0, \
hDataset.RasterYSize/2.0 );
ulx = GDALGetLon( hDataset, hTransform, 0.0, 0.0 );
uly = GDALGetLat( hDataset, hTransform, 0.0, 0.0 );
lrx = GDALGetLon( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
lry = GDALGetLat( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
if (centerLon):
centLon = centerLon
if (pszProjection[0:6] == "GEOGCS"):
UpperLeftCornerX = semiMajor * (ulx - centLon) * math.pi / 180.0
UpperLeftCornerY = semiMajor * uly * math.pi / 180.0
if debug:
bands = hDataset.RasterCount
for iBand in range(hDataset.RasterCount):
hBand = hDataset.GetRasterBand(iBand+1 )
(nBlockXSize, nBlockYSize) = hBand.GetBlockSize()
print( "Band %d Block=%dx%d Type=%s, ColorInterp=%s" % ( iBand+1, \
nBlockXSize, nBlockYSize, \
gdal.GetDataTypeName(hBand.DataType), \
gdal.GetColorInterpretationName( \
hBand.GetRasterColorInterpretation()) ))
if hBand.GetDescription() is not None \
and len(hBand.GetDescription()) > 0 :
print( " Description = %s" % hBand.GetDescription() )
dfMin = hBand.GetMinimum()
dfMax = hBand.GetMaximum()
if dfMin is not None or dfMax is not None or bComputeMinMax:
line = " "
if dfMin is not None:
line = line + ("Min=%.3f " % dfMin)
if dfMax is not None:
line = line + ("Max=%.3f " % dfMax)
if bComputeMinMax:
gdal.ErrorReset()
adfCMinMax = hBand.ComputeRasterMinMax(False)
if gdal.GetLastErrorType() == gdal.CE_None:
line = line + ( " Computed Min/Max=%.3f,%.3f" % ( \
adfCMinMax[0], adfCMinMax[1] ))
print( line )
stats = hBand.GetStatistics( bApproxStats, bStats)
if stats[3] >= 0.0:
print( " Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
if bReportHistograms:
hist = hBand.GetDefaultHistogram(force = True, callback = gdal.TermProgress)
if hist is not None:
dfMin = hist[0]
dfMax = hist[1]
nBucketCount = hist[2]
panHistogram = hist[3]
print( " %d buckets from %g to %g:" % ( \
nBucketCount, dfMin, dfMax ))
line = ' '
for bucket in panHistogram:
line = line + ("%d " % bucket)
print(line)
if bComputeChecksum:
print( " Checksum=%d" % hBand.Checksum())
dfNoData = hBand.GetNoDataValue()
if dfNoData is not None:
if dfNoData != dfNoData:
print( " NoData Value=nan" )
else:
print( " NoData Value=%.18g" % dfNoData )
if hBand.GetOverviewCount() > 0:
line = " Overviews: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0 :
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%dx%d" % (hOverview.XSize, hOverview.YSize))
pszResampling = \
hOverview.GetMetadataItem( "RESAMPLING", "" )
if pszResampling is not None \
and len(pszResampling) >= 12 \
and EQUAL(pszResampling[0:12],"AVERAGE_BIT2"):
line = line + "*"
else:
line = line + "(null)"
print(line)
if bComputeChecksum:
line = " Overviews checksum: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
print(line)
if hBand.HasArbitraryOverviews():
print( " Overviews: arbitrary" )
nMaskFlags = hBand.GetMaskFlags()
if (nMaskFlags & (gdal.GMF_NODATA|gdal.GMF_ALL_VALID)) == 0:
hMaskBand = hBand.GetMaskBand()
line = " Mask Flags: "
if (nMaskFlags & gdal.GMF_PER_DATASET) != 0:
line = line + "PER_DATASET "
if (nMaskFlags & gdal.GMF_ALPHA) != 0:
line = line + "ALPHA "
if (nMaskFlags & gdal.GMF_NODATA) != 0:
line = line + "NODATA "
if (nMaskFlags & gdal.GMF_ALL_VALID) != 0:
line = line + "ALL_VALID "
print(line)
if hMaskBand is not None and \
hMaskBand.GetOverviewCount() > 0:
line = " Overviews of mask band: "
for iOverview in range(hMaskBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hMaskBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
if len(hBand.GetUnitType()) > 0:
print( " Unit Type: %s" % hBand.GetUnitType())
papszCategories = hBand.GetRasterCategoryNames()
if papszCategories is not None:
print( " Categories:" );
i = 0
for category in papszCategories:
print( " %3d: %s" % (i, category) )
i = i + 1
if hBand.GetScale() != 1.0 or hBand.GetOffset() != 0.0:
print( " Offset: %.15g, Scale:%.15g" % \
( hBand.GetOffset(), hBand.GetScale()))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
hTable = hBand.GetRasterColorTable()
if hBand.GetRasterColorInterpretation() == gdal.GCI_PaletteIndex \
and hTable is not None:
print( " Color Table (%s with %d entries)" % (\
gdal.GetPaletteInterpretationName( \
hTable.GetPaletteInterpretation( )), \
hTable.GetCount() ))
if bShowColorTable:
for i in range(hTable.GetCount()):
sEntry = hTable.GetColorEntry(i)
print( " %3d: %d,%d,%d,%d" % ( \
i, \
sEntry[0],\
sEntry[1],\
sEntry[2],\
sEntry[3] ))
if bShowRAT:
hRAT = hBand.GetDefaultRAT()
instrList = pszFilename.split("_")
hBand = hDataset.GetRasterBand( 1 )
print gdal.GetDataTypeName(hBand.DataType)
if EQUAL(gdal.GetDataTypeName(hBand.DataType), "Float32"):
sample_bits = 32
sample_type = "Real"
sample_mask = "2#11111111111111111111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "Float64"):
sample_bits = 32
sample_type = "Real"
sample_mask = "2#11111111111111111111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "INT16"):
sample_bits = 16
sample_type = "SignedWord"
sample_mask = "2#1111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "UINT16"):
sample_bits = 16
sample_type = "UsignedWord"
sample_mask = "2#1111111111111111#"
elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "Byte"):
sample_bits = 8
sample_type = "UnsignedByte"
sample_mask = "2#11111111#"
else:
print( " %s: Not supported pixel type. Please convert to 8, 16 Int, or 32 Float" % gdal.GetDataTypeName(hBand.DataType))
sys.exit(1)
f.write('Object = IsisCube\n')
f.write(' Object = Core\n')
f.write(' StartByte = 1\n')
f.write(' ^Core = %s\n' % (dst_cub))
f.write(' Format = BandSequential\n')
f.write('\n')
f.write(' Group = Dimensions\n')
f.write(' Samples = %d\n' % (hDataset.RasterXSize))
f.write(' Lines = %d\n' % hDataset.RasterYSize)
f.write(' Bands = %d\n' % hDataset.RasterCount)
f.write(' End_Group\n')
f.write('\n')
f.write(' Group = Pixels\n')
f.write(' Type = %s\n' % (sample_type))
f.write(' ByteOrder = Lsb\n')
if base is None:
f.write(' Base = %.10g\n' % ( hBand.GetOffset() ))
if EQUAL(sample_type, "REAL"):
if (hBand.GetOffset() <> 0):
print("Warning: a none 0 'base' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this base value: %.10g" % ( hBand.GetOffset() ))
else:
f.write(' Base = %.10g\n' % base )
if EQUAL(sample_type, "REAL"):
print("Warning: '-base' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this base value.")
if multiplier is None:
f.write(' Multiplier = %.10g\n' % ( hBand.GetScale() ))
if EQUAL(sample_type, "REAL"):
if (hBand.GetScale() <> 1):
print("Warning: a none 1 'multiplier' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this multiplier value: %.10g" % ( hBand.GetScale() ))
else:
f.write(' Multiplier = %.10g\n' % multiplier )
if EQUAL(sample_type, "REAL"):
print("Warning: '-multiplier' was set but input is 32bit Float. ISIS will not use this value when type is REAL. Please use 'fx' to apply this multiplier value.")
f.write(' End_Group\n')
f.write(' End_Object\n')
f.write('\n')
f.write(' Group = Archive\n')
f.write(' DataSetId = %s\n' % pszFilename.split(".")[0])
f.write(' ProducerInstitutionName = \"Astrogeology Science Center\"\n')
f.write(' ProducerId = Astrogeology\n')
f.write(' ProducerFullName = USGS\n')
if "_v" in pszFilename:
f.write(' ProductId = %s\n' % instrList[-1].split(".")[0].upper())
else:
f.write(' ProductId = n/a\n')
f.write(' ProductVersionId = n/a\n')
f.write(' InstrumentHostName = n/a\n')
f.write(' InstrumentName = n/a\n')
f.write(' InstrumentId = n/a\n')
f.write(' TargetName = %s\n' % target)
f.write(' MissionPhaseName = n/a\n')
f.write(' End_Group\n')
f.write('\n')
if target <> "n/a":
f.write(' Group = Mapping\n')
f.write(' ProjectionName = %s\n' % mapProjection)
if ((centLon < 0) and force360):
centLon = centLon + 360
f.write(' CenterLongitude = %.5f\n' % centLon)
f.write(' CenterLatitude = %.5f\n' % centLat)
if EQUAL(mapProjection,"TransverseMercator"):
f.write(' ScaleFactor = %6.5f\n' % TMscale)
f.write(' TargetName = %s\n' % target)
f.write(' EquatorialRadius = %.1f <meters>\n' % semiMajor)
f.write(' PolarRadius = %.1f <meters>\n' % semiMinor)
if EQUAL(mapProjection,"TransverseMercator"):
f.write(' LatitudeType = Planetographic\n')
else:
f.write(' LatitudeType = Planetocentric\n')
f.write(' LongitudeDirection = PositiveEast\n')
if (force360 or (lrx > 180)):
f.write(' LongitudeDomain = 360\n')
else:
f.write(' LongitudeDomain = 180\n')
f.write(' PixelResolution = %.8f <meters/pixel>\n' % mres )
f.write(' Scale = %.4f <pixel/degree>\n' % mapres )
if lry < uly:
f.write(' MinimumLatitude = %.8f\n' % lry)
f.write(' MaximumLatitude = %.8f\n' % uly)
else:
f.write(' MinimumLatitude = %.8f\n' % uly)
f.write(' MaximumLatitude = %.8f\n' % lry)
if (force360):
if (ulx < 0):
ulx = ulx + 360
if (lrx < 0):
lrx = lrx + 360
if lrx < ulx:
f.write(' MinimumLongitude = %.8f\n' % lrx)
f.write(' MaximumLongitude = %.8f\n' % ulx)
else:
f.write(' MinimumLongitude = %.8f\n' % ulx)
f.write(' MaximumLongitude = %.8f\n' % lrx)
f.write(' UpperLeftCornerX = %.6f <meters>\n' % ( UpperLeftCornerX ))
f.write(' UpperLeftCornerY = %.6f <meters>\n' % ( UpperLeftCornerY ))
f.write(' End_Group\n')
f.write('End_Object\n')
f.write('\n')
f.write('Object = Label\n')
f.write(' Bytes = 256\n')
f.write('End_Object\n')
f.write('\n')
f.write('Object = History\n')
f.write(' Name = IsisCube\n')
f.write(' StartByte = 1\n')
f.write(' Bytes = 0\n')
f.write(' ^History = %s\n' % dst_hst)
f.write('End_Object\n')
f.write('End\n')
f.close()
f_hst.close()
nt ('Complete')
return 0
def GDALInfoReportCorner( hDataset, hTransform, corner_name, x, y ):
line = "%-11s " % corner_name
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
line = line + ("(%7.1f,%7.1f)" % (x, y ))
print(line)
return False
if abs(dfGeoX) < 181 and abs(dfGeoY) < 91:
line = line + ( "(%12.7f,%12.7f) " % (dfGeoX, dfGeoY ))
else:
line = line + ( "(%12.3f,%12.3f) " % (dfGeoX, dfGeoY ))
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
line = line + ( "(%s," % gdal.DecToDMS( pnt[0], "Long", 2 ) )
line = line + ( "%s)" % gdal.DecToDMS( pnt[1], "Lat", 2 ) )
print(line)
return True
def GDALGetLon( hDataset, hTransform, x, y ):
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[0]
return dfGeoX
def GDALGetLat( hDataset, hTransform, x, y ):
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[1]
return dfGeoY
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800:
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
sys.exit(main(sys.argv))
| false | true |
f72f1bae0e6194d644c89e50750e438c176f29ce | 1,867 | py | Python | service/es_utils.py | LandRegistry/digital-register-elasticsearch-updater | 2e7b95d8d5eac70e9bba6612bed09bf58376e781 | [
"MIT"
] | null | null | null | service/es_utils.py | LandRegistry/digital-register-elasticsearch-updater | 2e7b95d8d5eac70e9bba6612bed09bf58376e781 | [
"MIT"
] | 12 | 2015-06-05T15:10:25.000Z | 2016-04-21T15:17:19.000Z | service/es_utils.py | LandRegistry/digital-register-elasticsearch-updater | 2e7b95d8d5eac70e9bba6612bed09bf58376e781 | [
"MIT"
] | 1 | 2021-04-11T06:03:41.000Z | 2021-04-11T06:03:41.000Z | from elasticsearch import Elasticsearch # type: ignore
from elasticsearch.client import IndicesClient # type: ignore
from elasticsearch.helpers import bulk # type: ignore
import logging
from config import CONFIG_DICT
LOGGER = logging.getLogger(__name__)
ELASTICSEARCH_NODES = [CONFIG_DICT['ELASTICSEARCH_URI']]
elasticsearch_client = Elasticsearch(ELASTICSEARCH_NODES)
indices_client = IndicesClient(elasticsearch_client)
def ensure_mapping_exists(index_name, doc_type, mapping):
if index_name not in elasticsearch_client.indices.status()['indices']:
LOGGER.info(
"Index '{}' not found in elasticsearch. Creating...".format(index_name)
)
elasticsearch_client.index(index=index_name, doc_type=doc_type, body={})
else:
LOGGER.info("Index '{}' with doc type '{}' already exists".format(index_name, doc_type))
LOGGER.info("Ensuring mapping exists for index '{}', doc type '{}'".format(
index_name, doc_type
))
indices_client.put_mapping(
index=index_name, doc_type=doc_type, body=mapping,
)
def execute_elasticsearch_actions(actions):
return bulk(elasticsearch_client, actions)
def search(query_dict, index_name, doc_type):
result = elasticsearch_client.search(
index=index_name, doc_type=doc_type, body=query_dict
)
return result['hits']['hits']
def get_upsert_action(index_name, doc_type, document, id):
return {
'doc_as_upsert': True,
'_op_type': 'update',
'_index': index_name,
'_type': doc_type,
'_id': id,
'doc': document,
}
def get_delete_action(index_name, doc_type, id):
return {
'_op_type': 'delete',
'_index': index_name,
'_type': doc_type,
'_id': id,
}
def get_cluster_info():
return elasticsearch_client.info()
| 26.671429 | 96 | 0.681843 | from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
from elasticsearch.helpers import bulk
import logging
from config import CONFIG_DICT
LOGGER = logging.getLogger(__name__)
ELASTICSEARCH_NODES = [CONFIG_DICT['ELASTICSEARCH_URI']]
elasticsearch_client = Elasticsearch(ELASTICSEARCH_NODES)
indices_client = IndicesClient(elasticsearch_client)
def ensure_mapping_exists(index_name, doc_type, mapping):
if index_name not in elasticsearch_client.indices.status()['indices']:
LOGGER.info(
"Index '{}' not found in elasticsearch. Creating...".format(index_name)
)
elasticsearch_client.index(index=index_name, doc_type=doc_type, body={})
else:
LOGGER.info("Index '{}' with doc type '{}' already exists".format(index_name, doc_type))
LOGGER.info("Ensuring mapping exists for index '{}', doc type '{}'".format(
index_name, doc_type
))
indices_client.put_mapping(
index=index_name, doc_type=doc_type, body=mapping,
)
def execute_elasticsearch_actions(actions):
return bulk(elasticsearch_client, actions)
def search(query_dict, index_name, doc_type):
result = elasticsearch_client.search(
index=index_name, doc_type=doc_type, body=query_dict
)
return result['hits']['hits']
def get_upsert_action(index_name, doc_type, document, id):
return {
'doc_as_upsert': True,
'_op_type': 'update',
'_index': index_name,
'_type': doc_type,
'_id': id,
'doc': document,
}
def get_delete_action(index_name, doc_type, id):
return {
'_op_type': 'delete',
'_index': index_name,
'_type': doc_type,
'_id': id,
}
def get_cluster_info():
return elasticsearch_client.info()
| true | true |
f72f1bee9e17f5e0644db620d7e3407dcb35a5dc | 716 | py | Python | src/pre_R1956001.py | atrettel/sheardata | db09d70ba464e24bdb2fb7729b6f3e905af68d72 | [
"MIT"
] | 2 | 2020-04-27T19:56:07.000Z | 2022-02-27T22:16:19.000Z | src/pre_R1956001.py | atrettel/sheardata | db09d70ba464e24bdb2fb7729b6f3e905af68d72 | [
"MIT"
] | null | null | null | src/pre_R1956001.py | atrettel/sheardata | db09d70ba464e24bdb2fb7729b6f3e905af68d72 | [
"MIT"
] | 1 | 2021-11-05T18:39:35.000Z | 2021-11-05T18:39:35.000Z | #!/usr/bin/env python3
# Copyright (C) 2020-2021 Andrew Trettel
#
# SPDX-License-Identifier: MIT
import csv
import math
import sqlite3
import sheardata as sd
import sys
conn = sqlite3.connect( sys.argv[1] )
cursor = conn.cursor()
cursor.execute( "PRAGMA foreign_keys = ON;" )
flow_class = sd.FC_BOUNDARY_DRIVEN_FLOW
year = 1956
study_number = 1
study_id = sd.add_study(
cursor,
flow_class_id=flow_class,
year=year,
study_number=study_number,
study_type_id=sd.ST_EXPERIMENT,
)
sd.add_study_source( cursor, study_id, "ReichardtH+1956+deu+JOUR", sd.PRIMARY_SOURCE )
sd.add_study_source( cursor, study_id, "ReichardtH+1959+deu+RPRT", sd.PRIMARY_SOURCE )
conn.commit()
conn.close()
| 21.058824 | 86 | 0.736034 |
import csv
import math
import sqlite3
import sheardata as sd
import sys
conn = sqlite3.connect( sys.argv[1] )
cursor = conn.cursor()
cursor.execute( "PRAGMA foreign_keys = ON;" )
flow_class = sd.FC_BOUNDARY_DRIVEN_FLOW
year = 1956
study_number = 1
study_id = sd.add_study(
cursor,
flow_class_id=flow_class,
year=year,
study_number=study_number,
study_type_id=sd.ST_EXPERIMENT,
)
sd.add_study_source( cursor, study_id, "ReichardtH+1956+deu+JOUR", sd.PRIMARY_SOURCE )
sd.add_study_source( cursor, study_id, "ReichardtH+1959+deu+RPRT", sd.PRIMARY_SOURCE )
conn.commit()
conn.close()
| true | true |
f72f1c4792e80f4b2c6d910802fa3060425bf488 | 8,356 | py | Python | pythonExamples/fileIngestModule.py | trac3me/autopsy | ff70cdd19bfad1b4966c5e25933035daba132535 | [
"Apache-2.0"
] | 1 | 2015-01-31T19:20:43.000Z | 2015-01-31T19:20:43.000Z | pythonExamples/fileIngestModule.py | trac3me/autopsy | ff70cdd19bfad1b4966c5e25933035daba132535 | [
"Apache-2.0"
] | null | null | null | pythonExamples/fileIngestModule.py | trac3me/autopsy | ff70cdd19bfad1b4966c5e25933035daba132535 | [
"Apache-2.0"
] | null | null | null | # Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple file-level ingest module for Autopsy.
# Search for TODO for the things that you need to change
# See http://sleuthkit.org/autopsy/docs/api-docs/latest/index.html for documentation
import jarray
import inspect
from java.lang import System
from java.util.logging import Level
from org.sleuthkit.datamodel import Score
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import TskData
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard
from java.util import Arrays
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the anlaysis.
# TODO: Rename this to something more specific. Search and replace for it because it is used a few times
class SampleJythonFileIngestModuleFactory(IngestModuleFactoryAdapter):
# TODO: give it a unique name. Will be shown in module list, logs, etc.
moduleName = "Sample file ingest Module"
def getModuleDisplayName(self):
return self.moduleName
# TODO: Give it a description
def getModuleDescription(self):
return "Sample module that does X, Y, and Z."
def getModuleVersionNumber(self):
return "1.0"
# Return true if module wants to get called for each file
def isFileIngestModuleFactory(self):
return True
# can return null if isFileIngestModuleFactory returns false
def createFileIngestModule(self, ingestOptions):
return SampleJythonFileIngestModule()
# File-level ingest module. One gets created per thread.
# TODO: Rename this to something more specific. Could just remove "Factory" from above name.
# Looks at the attributes of the passed in file.
class SampleJythonFileIngestModule(FileIngestModule):
_logger = Logger.getLogger(SampleJythonFileIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
# TODO: Add any setup code that you need here.
def startUp(self, context):
self.filesFound = 0
# Throw an IngestModule.IngestModuleException exception if there was a problem setting up
# raise IngestModuleException("Oh No!")
pass
# Where the analysis is done. Each file will be passed into here.
# The 'file' object being passed in is of type org.sleuthkit.datamodel.AbstractFile.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/latest/classorg_1_1sleuthkit_1_1datamodel_1_1_abstract_file.html
# TODO: Add your analysis code in here.
def process(self, file):
# Skip non-files
if ((file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS) or
(file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNUSED_BLOCKS) or
(file.isFile() == False)):
return IngestModule.ProcessResult.OK
# Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
# For an example, we will flag files with .txt in the name and make a blackboard artifact.
if file.getName().lower().endswith(".txt"):
self.log(Level.INFO, "Found a text file: " + file.getName())
self.filesFound+=1
# Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of
# artifact. Refer to the developer docs for other examples.
attrs = Arrays.asList(BlackboardAttribute(BlackboardAttribute.Type.TSK_SET_NAME,
SampleJythonFileIngestModuleFactory.moduleName, "Text Files"))
art = file.newAnalysisResult(BlackboardArtifact.Type.TSK_INTERESTING_FILE_HIT, Score.SCORE_LIKELY_NOTABLE,
None, "Text Files", None, attrs).getAnalysisResult()
try:
# post the artifact for listeners of artifact events
blackboard.postArtifact(art, SampleJythonFileIngestModuleFactory.moduleName)
except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# For the example (this wouldn't be needed normally), we'll query the blackboard for data that was added
# by other modules. We then iterate over its attributes. We'll just print them, but you would probably
# want to do something with them.
artifactList = file.getArtifacts(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT)
for artifact in artifactList:
attributeList = artifact.getAttributes()
for attrib in attributeList:
self.log(Level.INFO, attrib.toString())
# To further the example, this code will read the contents of the file and count the number of bytes
inputStream = ReadContentInputStream(file)
buffer = jarray.zeros(1024, "b")
totLen = 0
len = inputStream.read(buffer)
while (len != -1):
totLen = totLen + len
len = inputStream.read(buffer)
return IngestModule.ProcessResult.OK
# Where any shutdown code is run and resources are freed.
# TODO: Add any shutdown code that you need here.
def shutDown(self):
# As a final part of this example, we'll send a message to the ingest inbox with the number of files found (in this thread)
message = IngestMessage.createMessage(
IngestMessage.MessageType.DATA, SampleJythonFileIngestModuleFactory.moduleName,
str(self.filesFound) + " files found")
ingestServices = IngestServices.getInstance().postMessage(message) | 49.443787 | 131 | 0.732049 |
import jarray
import inspect
from java.lang import System
from java.util.logging import Level
from org.sleuthkit.datamodel import Score
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import TskData
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard
from java.util import Arrays
class SampleJythonFileIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "Sample file ingest Module"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that does X, Y, and Z."
def getModuleVersionNumber(self):
return "1.0"
def isFileIngestModuleFactory(self):
return True
def createFileIngestModule(self, ingestOptions):
return SampleJythonFileIngestModule()
class SampleJythonFileIngestModule(FileIngestModule):
_logger = Logger.getLogger(SampleJythonFileIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def startUp(self, context):
self.filesFound = 0
pass
def process(self, file):
if ((file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS) or
(file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNUSED_BLOCKS) or
(file.isFile() == False)):
return IngestModule.ProcessResult.OK
blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
if file.getName().lower().endswith(".txt"):
self.log(Level.INFO, "Found a text file: " + file.getName())
self.filesFound+=1
attrs = Arrays.asList(BlackboardAttribute(BlackboardAttribute.Type.TSK_SET_NAME,
SampleJythonFileIngestModuleFactory.moduleName, "Text Files"))
art = file.newAnalysisResult(BlackboardArtifact.Type.TSK_INTERESTING_FILE_HIT, Score.SCORE_LIKELY_NOTABLE,
None, "Text Files", None, attrs).getAnalysisResult()
try:
blackboard.postArtifact(art, SampleJythonFileIngestModuleFactory.moduleName)
except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# want to do something with them.
artifactList = file.getArtifacts(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT)
for artifact in artifactList:
attributeList = artifact.getAttributes()
for attrib in attributeList:
self.log(Level.INFO, attrib.toString())
# To further the example, this code will read the contents of the file and count the number of bytes
inputStream = ReadContentInputStream(file)
buffer = jarray.zeros(1024, "b")
totLen = 0
len = inputStream.read(buffer)
while (len != -1):
totLen = totLen + len
len = inputStream.read(buffer)
return IngestModule.ProcessResult.OK
# Where any shutdown code is run and resources are freed.
# TODO: Add any shutdown code that you need here.
def shutDown(self):
# As a final part of this example, we'll send a message to the ingest inbox with the number of files found (in this thread)
message = IngestMessage.createMessage(
IngestMessage.MessageType.DATA, SampleJythonFileIngestModuleFactory.moduleName,
str(self.filesFound) + " files found")
ingestServices = IngestServices.getInstance().postMessage(message) | true | true |
f72f1c9c7960ef51f7a686696065cb2abe68dd75 | 144 | py | Python | django_async_redis/__init__.py | adamchainz/django-async-redis | e9a54ba885fa76e504ff18726f333264585da34a | [
"Apache-2.0"
] | 14 | 2020-10-06T18:29:07.000Z | 2021-10-02T05:11:14.000Z | django_async_redis/__init__.py | adamchainz/django-async-redis | e9a54ba885fa76e504ff18726f333264585da34a | [
"Apache-2.0"
] | 4 | 2020-10-06T18:38:09.000Z | 2021-08-24T20:38:54.000Z | django_async_redis/__init__.py | adamchainz/django-async-redis | e9a54ba885fa76e504ff18726f333264585da34a | [
"Apache-2.0"
] | 4 | 2020-10-07T07:13:21.000Z | 2022-01-27T21:28:30.000Z | """Top-level package for Django Async Redis."""
__author__ = """Andrew Chen Wang"""
__email__ = "acwangpython@gmail.com"
__version__ = "0.1.0"
| 24 | 47 | 0.701389 |
__author__ = """Andrew Chen Wang"""
__email__ = "acwangpython@gmail.com"
__version__ = "0.1.0"
| true | true |
f72f1e1b45423dbc134742585996a8848e0dc037 | 476 | py | Python | Dataset/Leetcode/train/12/549.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/12/549.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/12/549.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
roman_nums = {
1000: 'M',
900: 'CM',
500: 'D',
400: 'CD',
100: 'C',
90: 'XC',
50: 'L',
40: 'XL',
10: 'X',
9: 'IX',
5: 'V',
4: 'IV',
1: 'I'
}
def XXX(self, num: int) -> str:
for (roman_num, c) in self.roman_nums.items():
if num >= roman_num:
return c + self.XXX(num - roman_num)
return ""
| 19.04 | 54 | 0.355042 | class Solution:
roman_nums = {
1000: 'M',
900: 'CM',
500: 'D',
400: 'CD',
100: 'C',
90: 'XC',
50: 'L',
40: 'XL',
10: 'X',
9: 'IX',
5: 'V',
4: 'IV',
1: 'I'
}
def XXX(self, num: int) -> str:
for (roman_num, c) in self.roman_nums.items():
if num >= roman_num:
return c + self.XXX(num - roman_num)
return ""
| false | true |
f72f1e8b1425ad70062469566557c13ef9b23423 | 16,879 | py | Python | models/transformer.py | Honghe/AnchorDETR | fc3d45441241cd689b28878d3aa4b0bffb33a8b8 | [
"Apache-2.0"
] | null | null | null | models/transformer.py | Honghe/AnchorDETR | fc3d45441241cd689b28878d3aa4b0bffb33a8b8 | [
"Apache-2.0"
] | null | null | null | models/transformer.py | Honghe/AnchorDETR | fc3d45441241cd689b28878d3aa4b0bffb33a8b8 | [
"Apache-2.0"
] | null | null | null | # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from util.misc import inverse_sigmoid
from models.row_column_decoupled_attention import MultiheadRCDA
class Transformer(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.,
activation="relu", num_feature_levels=3,num_query_position = 300,num_query_pattern=3,
spatial_prior="learned",attention_type="RCDA"):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.attention_type = attention_type
encoder_layer = TransformerEncoderLayerSpatial(d_model, dim_feedforward,
dropout, activation, nhead , attention_type)
encoder_layer_level = TransformerEncoderLayerLevel(d_model, dim_feedforward,
dropout, activation, nhead)
decoder_layer = TransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation, nhead,
num_feature_levels, attention_type)
if num_feature_levels == 1:
self.num_encoder_layers_level = 0
else:
self.num_encoder_layers_level = num_encoder_layers // 2
self.num_encoder_layers_spatial = num_encoder_layers - self.num_encoder_layers_level
self.encoder_layers = _get_clones(encoder_layer, self.num_encoder_layers_spatial)
self.encoder_layers_level = _get_clones(encoder_layer_level, self.num_encoder_layers_level)
self.decoder_layers = _get_clones(decoder_layer, num_decoder_layers)
self.spatial_prior=spatial_prior
if num_feature_levels>1:
self.level_embed = nn.Embedding(num_feature_levels, d_model)
self.num_pattern = num_query_pattern
self.pattern = nn.Embedding(self.num_pattern, d_model)
self.num_position = num_query_position
if self.spatial_prior == "learned":
self.position = nn.Embedding(self.num_position, 2)
self.adapt_pos2d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.adapt_pos1d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.num_layers = num_decoder_layers
num_classes = 91
self.class_embed = nn.Linear(d_model, num_classes)
self.bbox_embed = MLP(d_model, d_model, 4, 3)
self._reset_parameters()
def _reset_parameters(self):
num_pred = self.num_layers
num_classes = 91
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
if self.spatial_prior == "learned":
nn.init.uniform_(self.position.weight.data, 0, 1)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
def forward(self, srcs, masks):
# prepare input for decoder
bs, l, c, h, w = srcs.shape
if self.spatial_prior == "learned":
reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)
elif self.spatial_prior == "grid":
nx=ny=round(math.sqrt(self.num_position))
self.num_position=nx*ny
x = (torch.arange(nx) + 0.5) / nx
y = (torch.arange(ny) + 0.5) / ny
xy=torch.meshgrid(x,y)
reference_points=torch.cat([xy[0].reshape(-1)[...,None],xy[1].reshape(-1)[...,None]],-1).cuda()
reference_points = reference_points.unsqueeze(0).repeat(bs, self.num_pattern, 1)
else:
raise ValueError(f'unknown {self.spatial_prior} spatial prior')
tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_position, 1).reshape(
bs, self.num_pattern * self.num_position, c)
mask = masks[-1].unsqueeze(1).repeat(1,l,1,1).reshape(bs*l,h,w)
pos_col, pos_row = mask2pos(mask)
if self.attention_type=="RCDA":
posemb_row = self.adapt_pos1d(pos2posemb1d(pos_row))
posemb_col = self.adapt_pos1d(pos2posemb1d(pos_col))
posemb_2d = None
else:
pos_2d = torch.cat([pos_row.unsqueeze(1).repeat(1, h, 1).unsqueeze(-1), pos_col.unsqueeze(2).repeat(1, 1, w).unsqueeze(-1)],dim=-1)
posemb_2d = self.adapt_pos2d(pos2posemb2d(pos_2d))
posemb_row = posemb_col = None
outputs = srcs.reshape(bs * l, c, h, w)
for idx in range(len(self.encoder_layers)):
outputs = self.encoder_layers[idx](outputs, mask, posemb_row, posemb_col,posemb_2d)
if idx < self.num_encoder_layers_level:
outputs = self.encoder_layers_level[idx](outputs, level_emb=self.level_embed.weight.unsqueeze(1).unsqueeze(0).repeat(bs,1,1,1).reshape(bs*l,1,c))
srcs = outputs.reshape(bs, l, c, h, w)
output = tgt
outputs_classes = []
outputs_coords = []
for lid, layer in enumerate(self.decoder_layers):
output = layer(output, reference_points, srcs, mask, adapt_pos2d=self.adapt_pos2d,
adapt_pos1d=self.adapt_pos1d, posemb_row=posemb_row, posemb_col=posemb_col,posemb_2d=posemb_2d)
reference = inverse_sigmoid(reference_points)
outputs_class = self.class_embed[lid](output)
tmp = self.bbox_embed[lid](output)
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class[None,])
outputs_coords.append(outputs_coord[None,])
output = torch.cat(outputs_classes, dim=0), torch.cat(outputs_coords, dim=0)
return output
class TransformerEncoderLayerSpatial(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
# self attention
self.self_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, padding_mask=None, posemb_row=None, posemb_col=None,posemb_2d=None):
# self attention
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
if self.attention_type=="RCDA":
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src2 = self.self_attn((src + posemb_row).reshape(bz, h * w, c), (src + posemb_col).reshape(bz, h * w, c),
src + posemb_row, src + posemb_col,
src, key_padding_mask=padding_mask)[0].transpose(0, 1).reshape(bz, h, w, c)
else:
src2 = self.self_attn((src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
(src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
src.reshape(bz, h * w, c).transpose(0, 1))[0].transpose(0, 1).reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerEncoderLayerLevel(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8):
super().__init__()
# self attention
self.self_attn_level = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, level_emb=0):
# self attention
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
src2 = self.self_attn_level(src.reshape(bz, h * w, c) + level_emb, src.reshape(bz, h * w, c) + level_emb,
src.reshape(bz, h * w, c))[0].reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model=256, d_ffn=1024,
dropout=0., activation="relu", n_heads=8,
n_levels=3, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
# cross attention
self.cross_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# level combination
if n_levels>1:
self.level_fc = nn.Linear(d_model * n_levels, d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, tgt, reference_points, srcs, src_padding_masks=None, adapt_pos2d=None,
adapt_pos1d=None, posemb_row=None, posemb_col=None, posemb_2d=None):
tgt_len = tgt.shape[1]
query_pos = pos2posemb2d(reference_points.squeeze(2))
query_pos = adapt_pos2d(query_pos)
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
bz, l, c, h, w = srcs.shape
srcs = srcs.reshape(bz * l, c, h, w).permute(0, 2, 3, 1)
if self.attention_type == "RCDA":
query_pos_x = adapt_pos1d(pos2posemb1d(reference_points[..., 0]))
query_pos_y = adapt_pos1d(pos2posemb1d(reference_points[..., 1]))
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src_row = src_col = srcs
k_row = src_row + posemb_row
k_col = src_col + posemb_col
tgt2 = self.cross_attn((tgt + query_pos_x).repeat(l, 1, 1), (tgt + query_pos_y).repeat(l, 1, 1), k_row, k_col,
srcs, key_padding_mask=src_padding_masks)[0].transpose(0, 1)
else:
tgt2 = self.cross_attn((tgt + query_pos).repeat(l, 1, 1).transpose(0, 1),
(srcs + posemb_2d).reshape(bz * l, h * w, c).transpose(0,1),
srcs.reshape(bz * l, h * w, c).transpose(0, 1))[0].transpose(0,1)
if l > 1:
tgt2 = self.level_fc(tgt2.reshape(bz, l, tgt_len, c).permute(0, 2, 3, 1).reshape(bz, tgt_len, c * l))
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.ffn(tgt)
return tgt
class FFN(nn.Module):
def __init__(self, d_model=256, d_ffn=1024, dropout=0., activation='relu'):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
nhead=args.nheads,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
activation="relu",
num_feature_levels=args.num_feature_levels,
num_query_position=args.num_query_position,
num_query_pattern=args.num_query_pattern,
spatial_prior=args.spatial_prior,
attention_type=args.attention_type,
)
def pos2posemb2d(pos, num_pos_feats=128, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., 0, None] / dim_t
pos_y = pos[..., 1, None] / dim_t
pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
posemb = torch.cat((pos_y, pos_x), dim=-1)
return posemb
def pos2posemb1d(pos, num_pos_feats=256, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., None] / dim_t
posemb = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
return posemb
def mask2pos(mask):
not_mask = ~mask
y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)
x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)
y_embed = (y_embed - 0.5) / y_embed[:, -1:]
x_embed = (x_embed - 0.5) / x_embed[:, -1:]
return y_embed, x_embed
| 39.071759 | 161 | 0.598377 |
import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from util.misc import inverse_sigmoid
from models.row_column_decoupled_attention import MultiheadRCDA
class Transformer(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.,
activation="relu", num_feature_levels=3,num_query_position = 300,num_query_pattern=3,
spatial_prior="learned",attention_type="RCDA"):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.attention_type = attention_type
encoder_layer = TransformerEncoderLayerSpatial(d_model, dim_feedforward,
dropout, activation, nhead , attention_type)
encoder_layer_level = TransformerEncoderLayerLevel(d_model, dim_feedforward,
dropout, activation, nhead)
decoder_layer = TransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation, nhead,
num_feature_levels, attention_type)
if num_feature_levels == 1:
self.num_encoder_layers_level = 0
else:
self.num_encoder_layers_level = num_encoder_layers // 2
self.num_encoder_layers_spatial = num_encoder_layers - self.num_encoder_layers_level
self.encoder_layers = _get_clones(encoder_layer, self.num_encoder_layers_spatial)
self.encoder_layers_level = _get_clones(encoder_layer_level, self.num_encoder_layers_level)
self.decoder_layers = _get_clones(decoder_layer, num_decoder_layers)
self.spatial_prior=spatial_prior
if num_feature_levels>1:
self.level_embed = nn.Embedding(num_feature_levels, d_model)
self.num_pattern = num_query_pattern
self.pattern = nn.Embedding(self.num_pattern, d_model)
self.num_position = num_query_position
if self.spatial_prior == "learned":
self.position = nn.Embedding(self.num_position, 2)
self.adapt_pos2d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.adapt_pos1d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.num_layers = num_decoder_layers
num_classes = 91
self.class_embed = nn.Linear(d_model, num_classes)
self.bbox_embed = MLP(d_model, d_model, 4, 3)
self._reset_parameters()
def _reset_parameters(self):
num_pred = self.num_layers
num_classes = 91
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
if self.spatial_prior == "learned":
nn.init.uniform_(self.position.weight.data, 0, 1)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
def forward(self, srcs, masks):
bs, l, c, h, w = srcs.shape
if self.spatial_prior == "learned":
reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)
elif self.spatial_prior == "grid":
nx=ny=round(math.sqrt(self.num_position))
self.num_position=nx*ny
x = (torch.arange(nx) + 0.5) / nx
y = (torch.arange(ny) + 0.5) / ny
xy=torch.meshgrid(x,y)
reference_points=torch.cat([xy[0].reshape(-1)[...,None],xy[1].reshape(-1)[...,None]],-1).cuda()
reference_points = reference_points.unsqueeze(0).repeat(bs, self.num_pattern, 1)
else:
raise ValueError(f'unknown {self.spatial_prior} spatial prior')
tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_position, 1).reshape(
bs, self.num_pattern * self.num_position, c)
mask = masks[-1].unsqueeze(1).repeat(1,l,1,1).reshape(bs*l,h,w)
pos_col, pos_row = mask2pos(mask)
if self.attention_type=="RCDA":
posemb_row = self.adapt_pos1d(pos2posemb1d(pos_row))
posemb_col = self.adapt_pos1d(pos2posemb1d(pos_col))
posemb_2d = None
else:
pos_2d = torch.cat([pos_row.unsqueeze(1).repeat(1, h, 1).unsqueeze(-1), pos_col.unsqueeze(2).repeat(1, 1, w).unsqueeze(-1)],dim=-1)
posemb_2d = self.adapt_pos2d(pos2posemb2d(pos_2d))
posemb_row = posemb_col = None
outputs = srcs.reshape(bs * l, c, h, w)
for idx in range(len(self.encoder_layers)):
outputs = self.encoder_layers[idx](outputs, mask, posemb_row, posemb_col,posemb_2d)
if idx < self.num_encoder_layers_level:
outputs = self.encoder_layers_level[idx](outputs, level_emb=self.level_embed.weight.unsqueeze(1).unsqueeze(0).repeat(bs,1,1,1).reshape(bs*l,1,c))
srcs = outputs.reshape(bs, l, c, h, w)
output = tgt
outputs_classes = []
outputs_coords = []
for lid, layer in enumerate(self.decoder_layers):
output = layer(output, reference_points, srcs, mask, adapt_pos2d=self.adapt_pos2d,
adapt_pos1d=self.adapt_pos1d, posemb_row=posemb_row, posemb_col=posemb_col,posemb_2d=posemb_2d)
reference = inverse_sigmoid(reference_points)
outputs_class = self.class_embed[lid](output)
tmp = self.bbox_embed[lid](output)
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class[None,])
outputs_coords.append(outputs_coord[None,])
output = torch.cat(outputs_classes, dim=0), torch.cat(outputs_coords, dim=0)
return output
class TransformerEncoderLayerSpatial(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
self.self_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, padding_mask=None, posemb_row=None, posemb_col=None,posemb_2d=None):
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
if self.attention_type=="RCDA":
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src2 = self.self_attn((src + posemb_row).reshape(bz, h * w, c), (src + posemb_col).reshape(bz, h * w, c),
src + posemb_row, src + posemb_col,
src, key_padding_mask=padding_mask)[0].transpose(0, 1).reshape(bz, h, w, c)
else:
src2 = self.self_attn((src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
(src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
src.reshape(bz, h * w, c).transpose(0, 1))[0].transpose(0, 1).reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerEncoderLayerLevel(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8):
super().__init__()
self.self_attn_level = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, level_emb=0):
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
src2 = self.self_attn_level(src.reshape(bz, h * w, c) + level_emb, src.reshape(bz, h * w, c) + level_emb,
src.reshape(bz, h * w, c))[0].reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model=256, d_ffn=1024,
dropout=0., activation="relu", n_heads=8,
n_levels=3, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
self.cross_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
if n_levels>1:
self.level_fc = nn.Linear(d_model * n_levels, d_model)
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, tgt, reference_points, srcs, src_padding_masks=None, adapt_pos2d=None,
adapt_pos1d=None, posemb_row=None, posemb_col=None, posemb_2d=None):
tgt_len = tgt.shape[1]
query_pos = pos2posemb2d(reference_points.squeeze(2))
query_pos = adapt_pos2d(query_pos)
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
bz, l, c, h, w = srcs.shape
srcs = srcs.reshape(bz * l, c, h, w).permute(0, 2, 3, 1)
if self.attention_type == "RCDA":
query_pos_x = adapt_pos1d(pos2posemb1d(reference_points[..., 0]))
query_pos_y = adapt_pos1d(pos2posemb1d(reference_points[..., 1]))
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src_row = src_col = srcs
k_row = src_row + posemb_row
k_col = src_col + posemb_col
tgt2 = self.cross_attn((tgt + query_pos_x).repeat(l, 1, 1), (tgt + query_pos_y).repeat(l, 1, 1), k_row, k_col,
srcs, key_padding_mask=src_padding_masks)[0].transpose(0, 1)
else:
tgt2 = self.cross_attn((tgt + query_pos).repeat(l, 1, 1).transpose(0, 1),
(srcs + posemb_2d).reshape(bz * l, h * w, c).transpose(0,1),
srcs.reshape(bz * l, h * w, c).transpose(0, 1))[0].transpose(0,1)
if l > 1:
tgt2 = self.level_fc(tgt2.reshape(bz, l, tgt_len, c).permute(0, 2, 3, 1).reshape(bz, tgt_len, c * l))
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt = self.ffn(tgt)
return tgt
class FFN(nn.Module):
def __init__(self, d_model=256, d_ffn=1024, dropout=0., activation='relu'):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
nhead=args.nheads,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
activation="relu",
num_feature_levels=args.num_feature_levels,
num_query_position=args.num_query_position,
num_query_pattern=args.num_query_pattern,
spatial_prior=args.spatial_prior,
attention_type=args.attention_type,
)
def pos2posemb2d(pos, num_pos_feats=128, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., 0, None] / dim_t
pos_y = pos[..., 1, None] / dim_t
pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
posemb = torch.cat((pos_y, pos_x), dim=-1)
return posemb
def pos2posemb1d(pos, num_pos_feats=256, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., None] / dim_t
posemb = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
return posemb
def mask2pos(mask):
not_mask = ~mask
y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)
x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)
y_embed = (y_embed - 0.5) / y_embed[:, -1:]
x_embed = (x_embed - 0.5) / x_embed[:, -1:]
return y_embed, x_embed
| true | true |
f72f1e928139d4f1f8e92e1e0291db259b10cc61 | 8,261 | py | Python | src/problem3.py | mcgeeml/06-Exam1Practice | 721f95eda65e0f5bd2ae541bc028e1d5dc9e6b47 | [
"MIT"
] | null | null | null | src/problem3.py | mcgeeml/06-Exam1Practice | 721f95eda65e0f5bd2ae541bc028e1d5dc9e6b47 | [
"MIT"
] | null | null | null | src/problem3.py | mcgeeml/06-Exam1Practice | 721f95eda65e0f5bd2ae541bc028e1d5dc9e6b47 | [
"MIT"
] | null | null | null | """
PRACTICE Exam 1, problem 3.
Authors: David Mutchler, Vibha Alangar, Valerie Galluzzi, Mark Hays,
Amanda Stouder, their colleagues and Myon McGee.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
########################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 1 question.
# 5 is a "typical" Test 1 question.
# 7 is a "hard" Test 1 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 1 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
########################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_problem3a()
run_test_problem3b()
def run_test_problem3a():
""" Tests the problem3a function. """
# ------------------------------------------------------------------
# TODO: 2. Implement this TEST function.
# It TESTS the problem1a function defined below.
# Include at least ** 5 ** tests (we wrote four for you).
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 4
# TIME ESTIMATE: 10 to 15 minutes.
# ------------------------------------------------------------------
# Window 1:
title = 'Problem 3a. Test 1: Start at (30, 30), 6 lines'
window1 = rg.RoseWindow(350, 200, title)
# Test 1 (it is on window 1):
point = rg.Point(30, 30)
expected = 36
answer = problem3a(window1, point, 6)
print()
print('Test 1 expected:', expected)
print(' actual: ', answer)
window1.close_on_mouse_click()
# Window 2:
title = 'Problem 3a. Test 2: Start at (80, 10), 9 lines.'
title += ' Test 3: Start at (30, 50), 3 lines.'
window2 = rg.RoseWindow(550, 200, title)
# Test 2 (it is on window 2):
point = rg.Point(80, 10)
expected = 75
answer = problem3a(window2, point, 9)
print()
print('Test 2 expected:', expected)
print(' actual: ', answer)
# Test 3 (it is also on window 2):
point = rg.Point(30, 50)
expected = 9
answer = problem3a(window2, point, 3)
print()
print('Test 3 expected:', expected)
print(' actual: ', answer)
window2.close_on_mouse_click()
# Window 3:
title = 'Problem 3a. Test 4: Start at (30, 30), 20 lines'
window3 = rg.RoseWindow(450, 300, title)
# Test 4 (it is on window 3):
point = rg.Point(30, 30)
expected = 218
answer = problem3a(window3, point, 20)
print()
print('Test 4 expected:', expected)
print(' actual: ', answer)
window3.close_on_mouse_click()
# ------------------------------------------------------------------
# TO DO: 2 (continued).
# Below this comment (or integrated with one of the above tests,
# your choice), add 1 more test case of your own choosing.
# ------------------------------------------------------------------
def problem3a(window, point, n):
"""
See problem3a_picture.pdf in this project for pictures
that may help you better understand the following specification:
What comes in:
-- An rg.RoseWindow.
-- An rg.Point.
-- A nonnegative integer n.
What goes out:
-- Returns the sum of the thicknesses of the rg.Lines
that are drawn as described in the Side effects (below).
Side effects:
Draws n rg.Lines on the given rg.RoseWindow,
as follows:
-- There are the given number (n) of rg.Lines.
-- Each rg.Line is vertical and has length 50.
(All units are pixels.)
-- The top of the first (leftmost) rg.Line
is at the given rg.Point.
-- Each successive rg.Line is 20 pixels to the right
and 10 pixels down from the previous rg.Line.
-- The first rg.Line has thickness 1.
-- Each successive rg.Line has thickness 2 greater than
the rg.Line to its left, but no greater than 13.
(So once a rg.Line has thickness 13,
it and all the rg.Lines to its right have thickness 13.)
Type hints:
:type window: rg.RoseWindow
:type point: rg.Point
:type n: int
"""
# ------------------------------------------------------------------
# TODO: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7 or 8
# TIME ESTIMATE: 20 to 35 minutes.
# ------------------------------------------------------------------
def run_test_problem3b():
""" Tests the problem3b function. """
# Test 1 is ALREADY DONE (here).
expected = 158
answer = problem3b(4, rg.Point(100, 50))
print()
print('Test 1 expected:', expected)
print(' actual: ', answer)
# Test 2 is ALREADY DONE (here).
expected = 539
answer = problem3b(7, rg.Point(30, 30))
print()
print('Test 2 expected:', expected)
print(' actual: ', answer)
def problem3b(m, point1):
"""
See problem3b_picture.pdf in this project for pictures
that may help you better understand the following specification:
What comes in:
-- A positive integer m.
-- An rg.Point.
What goes out:
-- Returns the sum of the thicknesses of ALL of the lines drawn
(over all m sets of lines).
Side effects:
-- Constructs and displays an rg.RoseWindow
that is 400 wide by 650 tall.
-- Draws, on the rg.RoseWindow, m SETS of lines, where:
-- Each SET of lines is drawn
*** by a call to ** problem3a **. ***
-- The first set has 3 lines that start at point1
(the given point).
-- The second set has 5 lines that start 60 pixels
directly below point1.
-- The third set has 7 lines that start 120 pixels
directly below point1.
-- The fourth set has 9 lines that start 180 pixels
directly below point1.
-- etc until m SETS of lines are drawn (where m is given).
-- Each set of lines should have widths (thicknesses)
per problem3a.
-- Waits for the user to click the mouse (and displays an
appropriate message prompting the user to do so),
then closes the window.
Type hints:
:type m: int
:type point1: rg.Point
"""
# ------------------------------------------------------------------
# TODO: 4. Implement and test this function.
# Tests have been written for you (above).
#
####################################################################
# IMPORTANT:
# ** For full credit you must appropriately use (call)
# ** the problem3a function that you implemented above.
####################################################################
# ------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 8 or 9
# TIME ESTIMATE: 20 to 30 minutes.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 37.211712 | 72 | 0.507808 |
import rosegraphics as rg
| true | true |
f72f1fb175ba5701b831b4c399b500717379e533 | 1,591 | py | Python | examples/01_plotting/plot_colormaps.py | SIMEXP/nilearn | 4f51aea58f38689ca32c2edd748528d521e6cfb0 | [
"BSD-2-Clause"
] | 827 | 2015-01-30T23:11:42.000Z | 2022-03-29T21:21:05.000Z | examples/01_plotting/plot_colormaps.py | SIMEXP/nilearn | 4f51aea58f38689ca32c2edd748528d521e6cfb0 | [
"BSD-2-Clause"
] | 2,845 | 2015-01-04T22:14:41.000Z | 2022-03-31T20:28:09.000Z | examples/01_plotting/plot_colormaps.py | SIMEXP/nilearn | 4f51aea58f38689ca32c2edd748528d521e6cfb0 | [
"BSD-2-Clause"
] | 484 | 2015-02-03T10:58:19.000Z | 2022-03-29T21:57:16.000Z | """
Matplotlib colormaps in Nilearn
================================
Visualize HCP connectome workbench color maps shipped with Nilearn
which can be used for plotting brain images on surface.
See :ref:`surface-plotting` for surface plotting details.
"""
import numpy as np
import matplotlib.pyplot as plt
from nilearn.plotting.cm import _cmap_d as nilearn_cmaps
from nilearn.plotting import show
###########################################################################
# Plot color maps
# ----------------
nmaps = len(nilearn_cmaps)
a = np.outer(np.arange(0, 1, 0.01), np.ones(10))
# Initialize the figure
plt.figure(figsize=(10, 4.2))
plt.subplots_adjust(top=0.4, bottom=0.05, left=0.01, right=0.99)
for index, cmap in enumerate(nilearn_cmaps):
plt.subplot(1, nmaps + 1, index + 1)
plt.imshow(a, cmap=nilearn_cmaps[cmap])
plt.axis('off')
plt.title(cmap, fontsize=10, va='bottom', rotation=90)
###########################################################################
# Plot matplotlib color maps
# --------------------------
plt.figure(figsize=(10, 5))
plt.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99)
deprecated_cmaps = ['Vega10', 'Vega20', 'Vega20b', 'Vega20c', 'spectral']
m_cmaps = []
for m in plt.cm.datad:
if not m.endswith("_r") and m not in deprecated_cmaps:
m_cmaps.append(m)
m_cmaps.sort()
for index, cmap in enumerate(m_cmaps):
plt.subplot(1, len(m_cmaps) + 1, index + 1)
plt.imshow(a, cmap=plt.get_cmap(cmap), aspect='auto')
plt.axis('off')
plt.title(cmap, fontsize=10, va='bottom', rotation=90)
show()
| 30.596154 | 75 | 0.606537 | import numpy as np
import matplotlib.pyplot as plt
from nilearn.plotting.cm import _cmap_d as nilearn_cmaps
from nilearn.plotting import show
| true | true |
f72f2071f1e1301bd9f2a6ccbf675cb274979e21 | 684 | py | Python | setup.py | pmartin23/metapub | 7dc3f2321720191d461056deeaedf69cd1479157 | [
"Apache-2.0"
] | null | null | null | setup.py | pmartin23/metapub | 7dc3f2321720191d461056deeaedf69cd1479157 | [
"Apache-2.0"
] | 3 | 2019-11-14T23:36:14.000Z | 2020-11-05T20:42:50.000Z | setup.py | pmartin23/metapub | 7dc3f2321720191d461056deeaedf69cd1479157 | [
"Apache-2.0"
] | 1 | 2018-02-08T12:25:12.000Z | 2018-02-08T12:25:12.000Z | import glob, os
from setuptools import setup, find_packages
setup(
name = 'metapub',
version = '0.4.3.5',
description = 'Pubmed / NCBI / eutils interaction library, handling the metadata of pubmed papers.',
url = 'https://bitbucket.org/metapub/metapub',
author = 'Naomi Most',
maintainer = 'Naomi Most',
author_email = 'naomi@nthmost.com',
maintainer_email = 'naomi@nthmost.com',
license = 'Apache 2.0',
packages = find_packages(),
install_requires = [
'setuptools',
'lxml',
'requests',
'eutils',
'tabulate',
'cssselect',
'unidecode',
'six',
'tox',
],
)
| 24.428571 | 104 | 0.574561 | import glob, os
from setuptools import setup, find_packages
setup(
name = 'metapub',
version = '0.4.3.5',
description = 'Pubmed / NCBI / eutils interaction library, handling the metadata of pubmed papers.',
url = 'https://bitbucket.org/metapub/metapub',
author = 'Naomi Most',
maintainer = 'Naomi Most',
author_email = 'naomi@nthmost.com',
maintainer_email = 'naomi@nthmost.com',
license = 'Apache 2.0',
packages = find_packages(),
install_requires = [
'setuptools',
'lxml',
'requests',
'eutils',
'tabulate',
'cssselect',
'unidecode',
'six',
'tox',
],
)
| true | true |
f72f20b73a9f992e3e7d7b8d15bee88e20b845b2 | 13,740 | py | Python | rltoolkit/rl.py | raznem/sac_ppo | c18e9bd32a70fcc4bc413565c6b885d7560b8b5a | [
"MIT"
] | null | null | null | rltoolkit/rl.py | raznem/sac_ppo | c18e9bd32a70fcc4bc413565c6b885d7560b8b5a | [
"MIT"
] | null | null | null | rltoolkit/rl.py | raznem/sac_ppo | c18e9bd32a70fcc4bc413565c6b885d7560b8b5a | [
"MIT"
] | null | null | null | import logging
from pathlib import Path
from typing import Any, Optional, Tuple, Union
import gym
import torch
import pickle as pkl
from rltoolkit import config, utils
from rltoolkit.buffer import Memory
from rltoolkit.stats_logger import StatsLogger
from rltoolkit.tensorboard_logger import TensorboardWriter
logger = logging.getLogger(__name__)
class MetaLearner:
def __init__(
self,
env_name: str,
use_gpu: bool,
debug_mode: bool = config.DEBUG_MODE,
tensorboard_dir: Union[str, None] = config.TENSORBOARD_DIR,
tensorboard_comment: str = config.TENSORBOARD_COMMENT,
):
f"""Class with parameters common for RL and other interactions with environment
Args:
env_name (str): Name of the gym environment.
use_gpu (bool): Use CUDA.
debug_mode (bool, optional): Log additional info.
Defaults to { config.DEBUG_MODE }
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
"""
self.env_name = env_name
if use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.env = gym.make(self.env_name)
self.discrete = isinstance(self.env.action_space, gym.spaces.Discrete)
self.ob_dim = self.env.observation_space.shape[0]
if self.discrete:
self.ac_dim = self.env.action_space.n
self.ac_lim = None
else:
self.ac_dim = self.env.action_space.shape[0]
self.ac_lim = torch.tensor(self.env.action_space.high, device=self.device)
self.obs_mean = torch.zeros(self.ob_dim, device=self.device)
self.obs_std = torch.ones(self.ob_dim, device=self.device)
self.iteration = 0 # used in tensorboard
self.opt = torch.optim.Adam
self.loss = {}
self.debug_mode = debug_mode
self.tensorboard_writer = None
self.tensorboard_comment = (
"_" + tensorboard_comment if tensorboard_comment else ""
)
self.tensorboard_dir = tensorboard_dir
def run_tensorboard_if_needed(self):
if self.tensorboard_writer is None and (self.tensorboard_dir is not None):
self.tensorboard_writer = TensorboardWriter(
env_name=self.env_name,
log_dir=self.tensorboard_dir,
filename=self.filename,
render=self.render,
)
def log_obs_mean_std_tensorboard(self):
"""
Log mean and std of observations in the tensorboard.
"""
self.run_tensorboard_if_needed()
self.tensorboard_writer.log_obs_mean_std(
self.iteration, self.obs_mean, self.obs_std
)
def update_obs_mean_std(self, buffer: Memory) -> Memory:
"""
Update running average of mean and stds based on the buffer.
Args:
buffer (Memory)
Returns:
Memory
"""
buffer.update_obs_mean_std()
self.obs_mean = buffer.obs_mean
self.obs_std = buffer.obs_std
if self.debug_mode and self.tensorboard_dir is not None:
self.log_obs_mean_std_tensorboard()
return buffer
class RL(MetaLearner):
def __init__(
self,
env_name: str = config.ENV_NAME,
gamma: float = config.GAMMA,
stats_freq: int = config.STATS_FREQ,
test_episodes: int = config.TEST_EPISODES,
batch_size: int = config.BATCH_SIZE,
iterations: int = config.ITERATIONS,
max_frames: int = None,
return_done: Union[int, None] = config.RETURN_DONE,
log_dir: str = config.LOG_DIR,
use_gpu: bool = config.USE_GPU,
verbose: int = config.VERBOSE,
render: bool = config.RENDER,
*args,
**kwargs,
):
f"""Basic parent class for reinforcement learning algorithms.
Args:
env_name (str, optional): Name of the gym environment.
Defaults to { config.ENV_NAME }.
gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.
stats_freq (int, optional): Frequency of logging the progress.
Defaults to { config.STATS_FREQ }.
batch_size (int, optional): Number of frames used for one algorithm step
(could be higher because batch collection stops when rollout ends).
Defaults to { config.BATCH_SIZE }.
iterations (int, optional): Number of algorithms iterations.
Defaults to { config.ITERATIONS }.
max_frames (int, optional): Limit of frames for training. Defaults to
{ None }.
return_done (Union[int, None], optional): target return, which will stop
training if reached. Defaults to { config.RETURN_DONE }.
log_dir (str, optional): Path for basic logs which includes final model.
Defaults to { config.LOG_DIR }.
use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.
verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.
render (bool, optional): Render rollouts to tensorboard.
Defaults to { config.RENDER }.
debug_mode (bool, optional): Log additional info.
Defaults to { config.DEBUG_MODE }
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
"""
super().__init__(env_name, use_gpu, *args, **kwargs)
assert iterations > 0, f"Iteration has to be positive not {iterations}"
if max_frames is not None:
assert (
max_frames <= iterations * batch_size
), "max_frames should be smaller or equal than iterations * batch_size"
self.max_frames = max_frames
self.gamma = gamma
self.stats_freq = stats_freq
self.test_episodes = test_episodes
self.batch_size = batch_size
self.iterations = iterations
self.return_done = return_done
if log_dir is not None:
self.log_dir = Path(log_dir)
self.log_dir.mkdir(parents=True, exist_ok=True)
else:
self.log_dir = log_dir
self.verbose = verbose
self.render = render
self.max_ep_len = self.env._max_episode_steps
self.start_time = utils.get_time()
self.hparams = {
"hparams/gamma": self.gamma,
"hparams/batch_size": self.batch_size,
"hparams/type": utils.get_pretty_type_name(self),
}
self.shortnames = config.SHORTNAMES
self.stats_logger = StatsLogger()
def train(self, iterations=None):
f""" Train RL model
Args:
iterations ([type], optional): Number of additional training iterations.
If None performs number of iterations defined in self.iterations.
Otherwise increase global counter by this value to run additional steps.
Defaults to { None }.
"""
self.run_tensorboard_if_needed()
if iterations:
self.iterations += iterations
while self.iteration < self.iterations:
buffer, time_diff = self.perform_iteration()
self.stats_logger.time_list.append(time_diff)
running_return = self.stats_logger.calc_running_return(buffer)
if self.return_done is not None and running_return >= self.return_done:
break
if self.iteration % self.stats_freq == 0:
self.logs_after_iteration(buffer)
if self.log_dir is not None:
self.stats_logger.dump_stats(self.log_path)
self.iteration += 1 # used also for logs
if (
self.max_frames is not None
and self.max_frames < self.stats_logger.frames
):
logger.info(f"Reached max_frames at {self.iteration} iteration") # INFO
break
self.logs_after_iteration(buffer, done=True)
if self.log_dir is not None:
self.save()
def test(self, episodes=None):
f"""Test policy
Args:
episodes (int): Number of episodes. Defaults to { None }.
Returns:
float: mean episode reward
"""
mean_reward = None
return mean_reward
@utils.measure_time
def perform_iteration(self):
raise NotImplementedError
def save_model(self):
raise NotImplementedError
def check_path(self, path):
if self.filename is None and path is None:
raise AttributeError
elif path is None:
path = str(self.log_path) + ".pkl"
return path
def collect_params_dict(self):
params_dict = {}
params_dict["actor"] = self.actor.state_dict()
params_dict["critic"] = self.critic.state_dict()
params_dict["obs_mean"] = self.obs_mean
params_dict["obs_std"] = self.obs_std
return params_dict
def apply_params_dict(self, params_dict):
self.actor.load_state_dict(params_dict["actor"])
self.critic.load_state_dict(params_dict["critic"])
self.obs_mean = params_dict["obs_mean"]
self.obs_std = params_dict["obs_std"]
def save(self, path: str = None):
f"""Save RL object
Args:
path (str): Path to save
"""
path = self.check_path(path)
with open(path, "wb") as f:
params_dict = self.collect_params_dict()
pkl.dump(params_dict, f)
def load(self, path: str):
"""Load RL object
Args:
path (str): Path to saved RL object
"""
path = self.check_path(path)
with open(path, "rb") as f:
params_dict = pkl.load(f)
self.apply_params_dict(params_dict)
@property
def log_iteration(self):
return self.iteration // self.stats_freq
@property
def filename(self):
suffix = self.get_tensorboard_hparams_suffix()
suffix += self.tensorboard_comment
filename = self.start_time + suffix
return filename
@property
def log_path(self):
log_path = Path(self.log_dir)
log_path = log_path / self.filename
return log_path
def logs_after_iteration(self, buffer: Memory, done: bool = False):
f"""Logs writer
Args:
buffer (Memory): Buffer used for tensorboard
done (bool, optional): Finalize tensorboard logging due to last iteration.
Defaults to { False }.
"""
if self.test_episodes is not None:
self.stats_logger.test_return = self.test()
running_return = self.stats_logger.running_return
if self.verbose:
if done:
self.stats_logger.task_done(self.iteration)
else:
self.stats_logger.log_stats(self.iteration)
self.stats_logger.stats.append([self.iteration, running_return])
self.stats_logger.reset_time_list()
if self.tensorboard_writer is not None:
self.add_tensorboard_logs(buffer, done)
def add_tensorboard_logs(self, buffer: Memory, done: bool):
self.tensorboard_writer.log_running_return(
self.iteration,
self.stats_logger.frames,
self.stats_logger.rollouts,
self.stats_logger.running_return,
)
if self.test_episodes:
self.tensorboard_writer.log_test_return(
self.iteration,
self.stats_logger.frames,
self.stats_logger.rollouts,
self.stats_logger.test_return,
)
if (self.log_iteration % 5) == 0 or done:
_, rendering_time = self.tensorboard_writer.record_episode(
self, self.iteration, done
)
self.tensorboard_writer.log_returns(self.iteration, buffer)
self.tensorboard_writer.log_actions(self.iteration, buffer)
self.tensorboard_writer.log_observations(self.iteration, buffer)
self.tensorboard_writer.log_loss(self.iteration, self.loss)
def get_tensorboard_hparams_suffix(self):
suffix = ""
for key, val in self.hparams.items():
if key in self.shortnames.keys():
key = self.shortnames[key]
else:
key = key.split("/")[1]
if isinstance(val, float):
val = f"{val:.2}"
else:
val = str(val)
suffix += f"-{key}{val}"
return suffix
def _get_initial_obs_mean_std(
self, obs_norm: Any
) -> Tuple[Optional[torch.tensor], Optional[torch.tensor]]:
f"""
Check if observations are normalized and if so return initial mean and std,
None otherwise.
Returns:
Tuple[Optional[torch.tensor], Optional[torch.tensor]]: obs mean and std
"""
if obs_norm:
obs_mean = torch.zeros(self.ob_dim, device=self.device)
obs_std = torch.ones(self.ob_dim, device=self.device)
else:
obs_mean = None
obs_std = None
return obs_mean, obs_std
| 35.412371 | 88 | 0.605968 | import logging
from pathlib import Path
from typing import Any, Optional, Tuple, Union
import gym
import torch
import pickle as pkl
from rltoolkit import config, utils
from rltoolkit.buffer import Memory
from rltoolkit.stats_logger import StatsLogger
from rltoolkit.tensorboard_logger import TensorboardWriter
logger = logging.getLogger(__name__)
class MetaLearner:
def __init__(
self,
env_name: str,
use_gpu: bool,
debug_mode: bool = config.DEBUG_MODE,
tensorboard_dir: Union[str, None] = config.TENSORBOARD_DIR,
tensorboard_comment: str = config.TENSORBOARD_COMMENT,
):
f"""Class with parameters common for RL and other interactions with environment
Args:
env_name (str): Name of the gym environment.
use_gpu (bool): Use CUDA.
debug_mode (bool, optional): Log additional info.
Defaults to { config.DEBUG_MODE }
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
"""
self.env_name = env_name
if use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.env = gym.make(self.env_name)
self.discrete = isinstance(self.env.action_space, gym.spaces.Discrete)
self.ob_dim = self.env.observation_space.shape[0]
if self.discrete:
self.ac_dim = self.env.action_space.n
self.ac_lim = None
else:
self.ac_dim = self.env.action_space.shape[0]
self.ac_lim = torch.tensor(self.env.action_space.high, device=self.device)
self.obs_mean = torch.zeros(self.ob_dim, device=self.device)
self.obs_std = torch.ones(self.ob_dim, device=self.device)
self.iteration = 0
self.opt = torch.optim.Adam
self.loss = {}
self.debug_mode = debug_mode
self.tensorboard_writer = None
self.tensorboard_comment = (
"_" + tensorboard_comment if tensorboard_comment else ""
)
self.tensorboard_dir = tensorboard_dir
def run_tensorboard_if_needed(self):
if self.tensorboard_writer is None and (self.tensorboard_dir is not None):
self.tensorboard_writer = TensorboardWriter(
env_name=self.env_name,
log_dir=self.tensorboard_dir,
filename=self.filename,
render=self.render,
)
def log_obs_mean_std_tensorboard(self):
self.run_tensorboard_if_needed()
self.tensorboard_writer.log_obs_mean_std(
self.iteration, self.obs_mean, self.obs_std
)
def update_obs_mean_std(self, buffer: Memory) -> Memory:
buffer.update_obs_mean_std()
self.obs_mean = buffer.obs_mean
self.obs_std = buffer.obs_std
if self.debug_mode and self.tensorboard_dir is not None:
self.log_obs_mean_std_tensorboard()
return buffer
class RL(MetaLearner):
def __init__(
self,
env_name: str = config.ENV_NAME,
gamma: float = config.GAMMA,
stats_freq: int = config.STATS_FREQ,
test_episodes: int = config.TEST_EPISODES,
batch_size: int = config.BATCH_SIZE,
iterations: int = config.ITERATIONS,
max_frames: int = None,
return_done: Union[int, None] = config.RETURN_DONE,
log_dir: str = config.LOG_DIR,
use_gpu: bool = config.USE_GPU,
verbose: int = config.VERBOSE,
render: bool = config.RENDER,
*args,
**kwargs,
):
f"""Basic parent class for reinforcement learning algorithms.
Args:
env_name (str, optional): Name of the gym environment.
Defaults to { config.ENV_NAME }.
gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.
stats_freq (int, optional): Frequency of logging the progress.
Defaults to { config.STATS_FREQ }.
batch_size (int, optional): Number of frames used for one algorithm step
(could be higher because batch collection stops when rollout ends).
Defaults to { config.BATCH_SIZE }.
iterations (int, optional): Number of algorithms iterations.
Defaults to { config.ITERATIONS }.
max_frames (int, optional): Limit of frames for training. Defaults to
{ None }.
return_done (Union[int, None], optional): target return, which will stop
training if reached. Defaults to { config.RETURN_DONE }.
log_dir (str, optional): Path for basic logs which includes final model.
Defaults to { config.LOG_DIR }.
use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.
verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.
render (bool, optional): Render rollouts to tensorboard.
Defaults to { config.RENDER }.
debug_mode (bool, optional): Log additional info.
Defaults to { config.DEBUG_MODE }
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
"""
super().__init__(env_name, use_gpu, *args, **kwargs)
assert iterations > 0, f"Iteration has to be positive not {iterations}"
if max_frames is not None:
assert (
max_frames <= iterations * batch_size
), "max_frames should be smaller or equal than iterations * batch_size"
self.max_frames = max_frames
self.gamma = gamma
self.stats_freq = stats_freq
self.test_episodes = test_episodes
self.batch_size = batch_size
self.iterations = iterations
self.return_done = return_done
if log_dir is not None:
self.log_dir = Path(log_dir)
self.log_dir.mkdir(parents=True, exist_ok=True)
else:
self.log_dir = log_dir
self.verbose = verbose
self.render = render
self.max_ep_len = self.env._max_episode_steps
self.start_time = utils.get_time()
self.hparams = {
"hparams/gamma": self.gamma,
"hparams/batch_size": self.batch_size,
"hparams/type": utils.get_pretty_type_name(self),
}
self.shortnames = config.SHORTNAMES
self.stats_logger = StatsLogger()
def train(self, iterations=None):
f""" Train RL model
Args:
iterations ([type], optional): Number of additional training iterations.
If None performs number of iterations defined in self.iterations.
Otherwise increase global counter by this value to run additional steps.
Defaults to { None }.
"""
self.run_tensorboard_if_needed()
if iterations:
self.iterations += iterations
while self.iteration < self.iterations:
buffer, time_diff = self.perform_iteration()
self.stats_logger.time_list.append(time_diff)
running_return = self.stats_logger.calc_running_return(buffer)
if self.return_done is not None and running_return >= self.return_done:
break
if self.iteration % self.stats_freq == 0:
self.logs_after_iteration(buffer)
if self.log_dir is not None:
self.stats_logger.dump_stats(self.log_path)
self.iteration += 1
if (
self.max_frames is not None
and self.max_frames < self.stats_logger.frames
):
logger.info(f"Reached max_frames at {self.iteration} iteration")
break
self.logs_after_iteration(buffer, done=True)
if self.log_dir is not None:
self.save()
def test(self, episodes=None):
f"""Test policy
Args:
episodes (int): Number of episodes. Defaults to { None }.
Returns:
float: mean episode reward
"""
mean_reward = None
return mean_reward
@utils.measure_time
def perform_iteration(self):
raise NotImplementedError
def save_model(self):
raise NotImplementedError
def check_path(self, path):
if self.filename is None and path is None:
raise AttributeError
elif path is None:
path = str(self.log_path) + ".pkl"
return path
def collect_params_dict(self):
params_dict = {}
params_dict["actor"] = self.actor.state_dict()
params_dict["critic"] = self.critic.state_dict()
params_dict["obs_mean"] = self.obs_mean
params_dict["obs_std"] = self.obs_std
return params_dict
def apply_params_dict(self, params_dict):
self.actor.load_state_dict(params_dict["actor"])
self.critic.load_state_dict(params_dict["critic"])
self.obs_mean = params_dict["obs_mean"]
self.obs_std = params_dict["obs_std"]
def save(self, path: str = None):
f"""Save RL object
Args:
path (str): Path to save
"""
path = self.check_path(path)
with open(path, "wb") as f:
params_dict = self.collect_params_dict()
pkl.dump(params_dict, f)
def load(self, path: str):
path = self.check_path(path)
with open(path, "rb") as f:
params_dict = pkl.load(f)
self.apply_params_dict(params_dict)
@property
def log_iteration(self):
return self.iteration // self.stats_freq
@property
def filename(self):
suffix = self.get_tensorboard_hparams_suffix()
suffix += self.tensorboard_comment
filename = self.start_time + suffix
return filename
@property
def log_path(self):
log_path = Path(self.log_dir)
log_path = log_path / self.filename
return log_path
def logs_after_iteration(self, buffer: Memory, done: bool = False):
f"""Logs writer
Args:
buffer (Memory): Buffer used for tensorboard
done (bool, optional): Finalize tensorboard logging due to last iteration.
Defaults to { False }.
"""
if self.test_episodes is not None:
self.stats_logger.test_return = self.test()
running_return = self.stats_logger.running_return
if self.verbose:
if done:
self.stats_logger.task_done(self.iteration)
else:
self.stats_logger.log_stats(self.iteration)
self.stats_logger.stats.append([self.iteration, running_return])
self.stats_logger.reset_time_list()
if self.tensorboard_writer is not None:
self.add_tensorboard_logs(buffer, done)
def add_tensorboard_logs(self, buffer: Memory, done: bool):
self.tensorboard_writer.log_running_return(
self.iteration,
self.stats_logger.frames,
self.stats_logger.rollouts,
self.stats_logger.running_return,
)
if self.test_episodes:
self.tensorboard_writer.log_test_return(
self.iteration,
self.stats_logger.frames,
self.stats_logger.rollouts,
self.stats_logger.test_return,
)
if (self.log_iteration % 5) == 0 or done:
_, rendering_time = self.tensorboard_writer.record_episode(
self, self.iteration, done
)
self.tensorboard_writer.log_returns(self.iteration, buffer)
self.tensorboard_writer.log_actions(self.iteration, buffer)
self.tensorboard_writer.log_observations(self.iteration, buffer)
self.tensorboard_writer.log_loss(self.iteration, self.loss)
def get_tensorboard_hparams_suffix(self):
suffix = ""
for key, val in self.hparams.items():
if key in self.shortnames.keys():
key = self.shortnames[key]
else:
key = key.split("/")[1]
if isinstance(val, float):
val = f"{val:.2}"
else:
val = str(val)
suffix += f"-{key}{val}"
return suffix
def _get_initial_obs_mean_std(
self, obs_norm: Any
) -> Tuple[Optional[torch.tensor], Optional[torch.tensor]]:
f"""
Check if observations are normalized and if so return initial mean and std,
None otherwise.
Returns:
Tuple[Optional[torch.tensor], Optional[torch.tensor]]: obs mean and std
"""
if obs_norm:
obs_mean = torch.zeros(self.ob_dim, device=self.device)
obs_std = torch.ones(self.ob_dim, device=self.device)
else:
obs_mean = None
obs_std = None
return obs_mean, obs_std
| true | true |
f72f22453b31c307baa4b3ff658060219127ff43 | 2,989 | py | Python | main_window.py | isu-enterprise/dacha | 5f8444e156fbed102fd7e25f9f3766538434659e | [
"Apache-2.0"
] | null | null | null | main_window.py | isu-enterprise/dacha | 5f8444e156fbed102fd7e25f9f3766538434659e | [
"Apache-2.0"
] | null | null | null | main_window.py | isu-enterprise/dacha | 5f8444e156fbed102fd7e25f9f3766538434659e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/main-window.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(673, 558)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 673, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuDocuments = QtWidgets.QMenu(self.menubar)
self.menuDocuments.setObjectName("menuDocuments")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionSomething = QtWidgets.QAction(MainWindow)
self.actionSomething.setEnabled(False)
self.actionSomething.setObjectName("actionSomething")
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setCheckable(False)
self.actionQuit.setObjectName("actionQuit")
self.actionMoney = QtWidgets.QAction(MainWindow)
self.actionMoney.setObjectName("actionMoney")
self.menuFile.addAction(self.actionSomething)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuDocuments.addAction(self.actionMoney)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuDocuments.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuDocuments.setTitle(_translate("MainWindow", "Documents"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionSomething.setText(_translate("MainWindow", "Something"))
self.actionQuit.setText(_translate("MainWindow", "Quit"))
self.actionMoney.setText(_translate("MainWindow", "Money"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 42.7 | 75 | 0.71094 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(673, 558)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 673, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuDocuments = QtWidgets.QMenu(self.menubar)
self.menuDocuments.setObjectName("menuDocuments")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionSomething = QtWidgets.QAction(MainWindow)
self.actionSomething.setEnabled(False)
self.actionSomething.setObjectName("actionSomething")
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setCheckable(False)
self.actionQuit.setObjectName("actionQuit")
self.actionMoney = QtWidgets.QAction(MainWindow)
self.actionMoney.setObjectName("actionMoney")
self.menuFile.addAction(self.actionSomething)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuDocuments.addAction(self.actionMoney)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuDocuments.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuDocuments.setTitle(_translate("MainWindow", "Documents"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionSomething.setText(_translate("MainWindow", "Something"))
self.actionQuit.setText(_translate("MainWindow", "Quit"))
self.actionMoney.setText(_translate("MainWindow", "Money"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| true | true |
f72f226a2107b3a3b8e9d4e622c36bce5ef81935 | 13,892 | py | Python | rfcs/0010-eaglesong/eaglesong.py | malinoisls01/npx-create-react-app-react-i18n-app-cd-react-i18n-app | ab7d16b9afae8cd9523f0f3afb46dbca83cd42e5 | [
"MIT"
] | 256 | 2018-11-28T04:00:20.000Z | 2022-03-14T13:46:51.000Z | rfcs/0010-eaglesong/eaglesong.py | malinoisls01/npx-create-react-app-react-i18n-app-cd-react-i18n-app | ab7d16b9afae8cd9523f0f3afb46dbca83cd42e5 | [
"MIT"
] | 122 | 2018-11-28T05:20:37.000Z | 2022-03-18T02:31:50.000Z | rfcs/0010-eaglesong/eaglesong.py | malinoisls01/npx-create-react-app-react-i18n-app-cd-react-i18n-app | ab7d16b9afae8cd9523f0f3afb46dbca83cd42e5 | [
"MIT"
] | 155 | 2018-11-28T04:52:13.000Z | 2022-03-28T23:21:12.000Z | def PrintState( state ):
s = ""
for i in range(0, 16):
s += "0x%08x" % state[i]
s += " "
print(s)
def EaglesongPermutation( state ):
N = 43
#PrintState(state)
for i in range(0, N):
state = EaglesongRound(state, i)
return state
def EaglesongRound( state, index ):
# constants
bitmatrix = [[1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1],
[0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1]]
coefficients = [[0, 2, 4], [0, 13, 22], [0, 4, 19], [0, 3, 14], [0, 27, 31], [0, 3, 8], [0, 17, 26], [0, 3, 12], [0, 18, 22], [0, 12, 18], [0, 4, 7], [0, 4, 31], [0, 12, 27], [0, 7, 17], [0, 7, 8], [0, 1, 13]]
injection_constants = [ 0x6e9e40ae , 0x71927c02 , 0x9a13d3b1 , 0xdaec32ad , 0x3d8951cf , 0xe1c9fe9a , 0xb806b54c , 0xacbbf417 ,
0xd3622b3b , 0xa082762a , 0x9edcf1c0 , 0xa9bada77 , 0x7f91e46c , 0xcb0f6e4f , 0x265d9241 , 0xb7bdeab0 ,
0x6260c9e6 , 0xff50dd2a , 0x9036aa71 , 0xce161879 , 0xd1307cdf , 0x89e456df , 0xf83133e2 , 0x65f55c3d ,
0x94871b01 , 0xb5d204cd , 0x583a3264 , 0x5e165957 , 0x4cbda964 , 0x675fca47 , 0xf4a3033e , 0x2a417322 ,
0x3b61432f , 0x7f5532f2 , 0xb609973b , 0x1a795239 , 0x31b477c9 , 0xd2949d28 , 0x78969712 , 0x0eb87b6e ,
0x7e11d22d , 0xccee88bd , 0xeed07eb8 , 0xe5563a81 , 0xe7cb6bcf , 0x25de953e , 0x4d05653a , 0x0b831557 ,
0x94b9cd77 , 0x13f01579 , 0x794b4a4a , 0x67e7c7dc , 0xc456d8d4 , 0x59689c9b , 0x668456d7 , 0x22d2a2e1 ,
0x38b3a828 , 0x0315ac3c , 0x438d681e , 0xab7109c5 , 0x97ee19a8 , 0xde062b2e , 0x2c76c47b , 0x0084456f ,
0x908f0fd3 , 0xa646551f , 0x3e826725 , 0xd521788e , 0x9f01c2b0 , 0x93180cdc , 0x92ea1df8 , 0x431a9aae ,
0x7c2ea356 , 0xda33ad03 , 0x46926893 , 0x66bde7d7 , 0xb501cc75 , 0x1f6e8a41 , 0x685250f4 , 0x3bb1f318 ,
0xaf238c04 , 0x974ed2ec , 0x5b159e49 , 0xd526f8bf , 0x12085626 , 0x3e2432a9 , 0x6bd20c48 , 0x1f1d59da ,
0x18ab1068 , 0x80f83cf8 , 0x2c8c11c0 , 0x7d548035 , 0x0ff675c3 , 0xfed160bf , 0x74bbbb24 , 0xd98e006b ,
0xdeaa47eb , 0x05f2179e , 0x437b0b71 , 0xa7c95f8f , 0x00a99d3b , 0x3fc3c444 , 0x72686f8e , 0x00fd01a9 ,
0xdedc0787 , 0xc6af7626 , 0x7012fe76 , 0xf2a5f7ce , 0x9a7b2eda , 0x5e57fcf2 , 0x4da0d4ad , 0x5c63b155 ,
0x34117375 , 0xd4134c11 , 0x2ea77435 , 0x5278b6de , 0xab522c4c , 0xbc8fc702 , 0xc94a09e4 , 0xebb93a9e ,
0x91ecb65e , 0x4c52ecc6 , 0x8703bb52 , 0xcb2d60aa , 0x30a0538a , 0x1514f10b , 0x157f6329 , 0x3429dc3d ,
0x5db73eb2 , 0xa7a1a969 , 0x7286bd24 , 0x0df6881e , 0x3785ba5f , 0xcd04623a , 0x02758170 , 0xd827f556 ,
0x99d95191 , 0x84457eb1 , 0x58a7fb22 , 0xd2967c5f , 0x4f0c33f6 , 0x4a02099a , 0xe0904821 , 0x94124036 ,
0x496a031b , 0x780b69c4 , 0xcf1a4927 , 0x87a119b8 , 0xcdfaf4f8 , 0x4cf9cd0f , 0x27c96a84 , 0x6d11117e ,
0x7f8cf847 , 0x74ceede5 , 0xc88905e6 , 0x60215841 , 0x7172875a , 0x736e993a , 0x010aa53c , 0x43d53c2b ,
0xf0d91a93 , 0x0d983b56 , 0xf816663c , 0xe5d13363 , 0x0a61737c , 0x09d51150 , 0x83a5ac2f , 0x3e884905 ,
0x7b01aeb5 , 0x600a6ea7 , 0xb7678f7b , 0x72b38977 , 0x068018f2 , 0xce6ae45b , 0x29188aa8 , 0xe5a0b1e9 ,
0xc04c2b86 , 0x8bd14d75 , 0x648781f3 , 0xdbae1e0a , 0xddcdd8ae , 0xab4d81a3 , 0x446baaba , 0x1cc0c19d ,
0x17be4f90 , 0x82c0e65d , 0x676f9c95 , 0x5c708db2 , 0x6fd4c867 , 0xa5106ef0 , 0x19dde49d , 0x78182f95 ,
0xd089cd81 , 0xa32e98fe , 0xbe306c82 , 0x6cd83d8c , 0x037f1bde , 0x0b15722d , 0xeddc1e22 , 0x93c76559 ,
0x8a2f571b , 0x92cc81b4 , 0x021b7477 , 0x67523904 , 0xc95dbccc , 0xac17ee9d , 0x944e46bc , 0x0781867e ,
0xc854dd9d , 0x26e2c30c , 0x858c0416 , 0x6d397708 , 0xebe29c58 , 0xc80ced86 , 0xd496b4ab , 0xbe45e6f5 ,
0x10d24706 , 0xacf8187a , 0x96f523cb , 0x2227e143 , 0x78c36564 , 0x4643adc2 , 0x4729d97a , 0xcff93e0d ,
0x25484bbd , 0x91c6798e , 0x95f773f4 , 0x44204675 , 0x2eda57ba , 0x06d313ef , 0xeeaa4466 , 0x2dfa7530 ,
0xa8af0c9b , 0x39f1535e , 0x0cc2b7bd , 0x38a76c0e , 0x4f41071d , 0xcdaf2475 , 0x49a6eff8 , 0x01621748 ,
0x36ebacab , 0xbd6d9a29 , 0x44d1cd65 , 0x40815dfd , 0x55fa5a1a , 0x87cce9e9 , 0xae559b45 , 0xd76b4c26 ,
0x637d60ad , 0xde29f5f9 , 0x97491cbb , 0xfb350040 , 0xffe7f997 , 0x201c9dcd , 0xe61320e9 , 0xa90987a3 ,
0xe24afa83 , 0x61c1e6fc , 0xcc87ff62 , 0xf1c9d8fa , 0x4fd04546 , 0x90ecc76e , 0x46e456b9 , 0x305dceb8 ,
0xf627e68c , 0x2d286815 , 0xc705bbfd , 0x101b6df3 , 0x892dae62 , 0xd5b7fb44 , 0xea1d5c94 , 0x5332e3cb ,
0xf856f88a , 0xb341b0e9 , 0x28408d9d , 0x5421bc17 , 0xeb9af9bc , 0x602371c5 , 0x67985a91 , 0xd774907f ,
0x7c4d697d , 0x9370b0b8 , 0x6ff5cebb , 0x7d465744 , 0x674ceac0 , 0xea9102fc , 0x0de94784 , 0xc793de69 ,
0xfe599bb1 , 0xc6ad952f , 0x6d6ca9c3 , 0x928c3f91 , 0xf9022f05 , 0x24a164dc , 0xe5e98cd3 , 0x7649efdb ,
0x6df3bcdb , 0x5d1e9ff1 , 0x17f5d010 , 0xe2686ea1 , 0x6eac77fe , 0x7bb5c585 , 0x88d90cbb , 0x18689163 ,
0x67c9efa5 , 0xc0b76d9b , 0x960efbab , 0xbd872807 , 0x70f4c474 , 0x56c29d20 , 0xd1541d15 , 0x88137033 ,
0xe3f02b3e , 0xb6d9b28d , 0x53a077ba , 0xeedcd29e , 0xa50a6c1d , 0x12c2801e , 0x52ba335b , 0x35984614 ,
0xe2599aa8 , 0xaf94ed1d , 0xd90d4767 , 0x202c7d07 , 0x77bec4f4 , 0xfa71bc80 , 0xfc5c8b76 , 0x8d0fbbfc ,
0xda366dc6 , 0x8b32a0c7 , 0x1b36f7fc , 0x6642dcbc , 0x6fe7e724 , 0x8b5fa782 , 0xc4227404 , 0x3a7d1da7 ,
0x517ed658 , 0x8a18df6d , 0x3e5c9b23 , 0x1fbd51ef , 0x1470601d , 0x3400389c , 0x676b065d , 0x8864ad80 ,
0xea6f1a9c , 0x2db484e1 , 0x608785f0 , 0x8dd384af , 0x69d26699 , 0x409c4e16 , 0x77f9986a , 0x7f491266 ,
0x883ea6cf , 0xeaa06072 , 0xfa2e5db5 , 0x352594b4 , 0x9156bb89 , 0xa2fbbbfb , 0xac3989c7 , 0x6e2422b1 ,
0x581f3560 , 0x1009a9b5 , 0x7e5ad9cd , 0xa9fc0a6e , 0x43e5998e , 0x7f8778f9 , 0xf038f8e1 , 0x5415c2e8 ,
0x6499b731 , 0xb82389ae , 0x05d4d819 , 0x0f06440e , 0xf1735aa0 , 0x986430ee , 0x47ec952c , 0xbf149cc5 ,
0xb3cb2cb6 , 0x3f41e8c2 , 0x271ac51b , 0x48ac5ded , 0xf76a0469 , 0x717bba4d , 0x4f5c90d6 , 0x3b74f756 ,
0x1824110a , 0xa4fd43e3 , 0x1eb0507c , 0xa9375c08 , 0x157c59a7 , 0x0cad8f51 , 0xd66031a0 , 0xabb5343f ,
0xe533fa43 , 0x1996e2bb , 0xd7953a71 , 0xd2529b94 , 0x58f0fa07 , 0x4c9b1877 , 0x057e990d , 0x8bfe19c4 ,
0xa8e2c0c9 , 0x99fcaada , 0x69d2aaca , 0xdc1c4642 , 0xf4d22307 , 0x7fe27e8c , 0x1366aa07 , 0x1594e637 ,
0xce1066bf , 0xdb922552 , 0x9930b52a , 0xaeaa9a3e , 0x31ff7eb4 , 0x5e1f945a , 0x150ac49c , 0x0ccdac2d ,
0xd8a8a217 , 0xb82ea6e5 , 0xd6a74659 , 0x67b7e3e6 , 0x836eef4a , 0xb6f90074 , 0x7fa3ea4b , 0xcb038123 ,
0xbf069f55 , 0x1fa83fc4 , 0xd6ebdb23 , 0x16f0a137 , 0x19a7110d , 0x5ff3b55f , 0xfb633868 , 0xb466f845 ,
0xbce0c198 , 0x88404296 , 0xddbdd88b , 0x7fc52546 , 0x63a553f8 , 0xa728405a , 0x378a2bce , 0x6862e570 ,
0xefb77e7d , 0xc611625e , 0x32515c15 , 0x6984b765 , 0xe8405976 , 0x9ba386fd , 0xd4eed4d9 , 0xf8fe0309 ,
0x0ce54601 , 0xbaf879c2 , 0xd8524057 , 0x1d8c1d7a , 0x72c0a3a9 , 0x5a1ffbde , 0x82f33a45 , 0x5143f446 ,
0x29c7e182 , 0xe536c32f , 0x5a6f245b , 0x44272adb , 0xcb701d9c , 0xf76137ec , 0x0841f145 , 0xe7042ecc ,
0xf1277dd7 , 0x745cf92c , 0xa8fe65fe , 0xd3e2d7cf , 0x54c513ef , 0x6079bc2d , 0xb66336b0 , 0x101e383b ,
0xbcd75753 , 0x25be238a , 0x56a6f0be , 0xeeffcc17 , 0x5ea31f3d , 0x0ae772f5 , 0xf76de3de , 0x1bbecdad ,
0xc9107d43 , 0xf7e38dce , 0x618358cd , 0x5c833f04 , 0xf6975906 , 0xde4177e5 , 0x67d314dc , 0xb4760f3e ,
0x56ce5888 , 0x0e8345a8 , 0xbff6b1bf , 0x78dfb112 , 0xf1709c1e , 0x7bb8ed8b , 0x902402b9 , 0xdaa64ae0 ,
0x46b71d89 , 0x7eee035f , 0xbe376509 , 0x99648f3a , 0x0863ea1f , 0x49ad8887 , 0x79bdecc5 , 0x3c10b568 ,
0x5f2e4bae , 0x04ef20ab , 0x72f8ce7b , 0x521e1ebe , 0x14525535 , 0x2e8af95b , 0x9094ccfd , 0xbcf36713 ,
0xc73953ef , 0xd4b91474 , 0x6554ec2d , 0xe3885c96 , 0x03dc73b7 , 0x931688a9 , 0xcbbef182 , 0x2b77cfc9 ,
0x632a32bd , 0xd2115dcc , 0x1ae5533d , 0x32684e13 , 0x4cc5a004 , 0x13321bde , 0x62cbd38d , 0x78383a3b ,
0xd00686f1 , 0x9f601ee7 , 0x7eaf23de , 0x3110c492 , 0x9c351209 , 0x7eb89d52 , 0x6d566eac , 0xc2efd226 ,
0x32e9fac5 , 0x52227274 , 0x09f84725 , 0xb8d0b605 , 0x72291f02 , 0x71b5c34b , 0x3dbfcbb8 , 0x04a02263 ,
0x55ba597f , 0xd4e4037d , 0xc813e1be , 0xffddeefa , 0xc3c058f3 , 0x87010f2e , 0x1dfcf55f , 0xc694eeeb ,
0xa9c01a74 , 0x98c2fc6b , 0xe57e1428 , 0xdd265a71 , 0x836b956d , 0x7e46ab1a , 0x5835d541 , 0x50b32505 ,
0xe640913c , 0xbb486079 , 0xfe496263 , 0x113c5b69 , 0x93cd6620 , 0x5efe823b , 0x2d657b40 , 0xb46dfc6c ,
0x57710c69 , 0xfe9fadeb , 0xb5f8728a , 0xe3224170 , 0xca28b751 , 0xfdabae56 , 0x5ab12c3c , 0xa697c457 ,
0xd28fa2b7 , 0x056579f2 , 0x9fd9d810 , 0xe3557478 , 0xd88d89ab , 0xa72a9422 , 0x6d47abd0 , 0x405bcbd9 ,
0x6f83ebaf , 0x13caec76 , 0xfceb9ee2 , 0x2e922df7 , 0xce9856df , 0xc05e9322 , 0x2772c854 , 0xb67f2a32 ,
0x6d1af28d , 0x3a78cf77 , 0xdff411e4 , 0x61c74ca9 , 0xed8b842e , 0x72880845 , 0x6e857085 , 0xc6404932 ,
0xee37f6bc , 0x27116f48 , 0x5e9ec45a , 0x8ea2a51f , 0xa5573db7 , 0xa746d036 , 0x486b4768 , 0x5b438f3b ,
0x18c54a5c , 0x64fcf08e , 0xe993cdc1 , 0x35c1ead3 , 0x9de07de7 , 0x321b841c , 0x87423c5e , 0x071aa0f6 ,
0x962eb75b , 0xbb06bdd2 , 0xdcdb5363 , 0x389752f2 , 0x83d9cc88 , 0xd014adc6 , 0xc71121bb , 0x2372f938 ,
0xcaff2650 , 0x62be8951 , 0x56dccaff , 0xac4084c0 , 0x09712e95 , 0x1d3c288f , 0x1b085744 , 0xe1d3cfef ,
0x5c9a812e , 0x6611fd59 , 0x85e46044 , 0x1981d885 , 0x5a4c903f , 0x43f30d4b , 0x7d1d601b , 0xdd3c3391 ,
0x030ec65e , 0xc12878cd , 0x72e795fe , 0xd0c76abd , 0x1ec085db , 0x7cbb61fa , 0x93e8dd1e , 0x8582eb06 ,
0x73563144 , 0x049d4e7e , 0x5fd5aefe , 0x7b842a00 , 0x75ced665 , 0xbb32d458 , 0x4e83bba7 , 0x8f15151f ,
0x7795a125 , 0xf0842455 , 0x499af99d , 0x565cc7fa , 0xa3b1278d , 0x3f27ce74 , 0x96ca058e , 0x8a497443 ,
0xa6fb8cae , 0xc115aa21 , 0x17504923 , 0xe4932402 , 0xaea886c2 , 0x8eb79af5 , 0xebd5ea6b , 0xc7980d3b ,
0x71369315 , 0x796e6a66 , 0x3a7ec708 , 0xb05175c8 , 0xe02b74e7 , 0xeb377ad3 , 0x6c8c1f54 , 0xb980c374 ,
0x59aee281 , 0x449cb799 , 0xe01f5605 , 0xed0e085e , 0xc9a1a3b4 , 0xaac481b1 , 0xc935c39c , 0xb7d8ce7f ]
# bit matrix
new = [0 for i in range(0,16)]
for j in range(0, 16):
for k in range(0, 16):
new[j] = new[j] ^ (state[k] * bitmatrix[k][j])
new[j] = new[j] & 0xffffffff # truncate to 32 bits, if necessary
state = new
# circulant multiplication
for i in range(0, 16):
acc = 0
for j in range(0, 3):
acc = acc ^ (state[i] << coefficients[i][j]) ^ (state[i] >> (32-coefficients[i][j]))
state[i] = acc & 0xffffffff # truncate to 32 bits, if necessary
# constants injection
for i in range(0, 16):
state[i] = state[i] ^ injection_constants[index*16 + i]
# add / rotate / add
for i in range(0, 8):
state[2*i] = (state[2*i] + state[2*i+1]) & 0xffffffff # truncate to 32 bits, if necessary
state[2*i] = (state[2*i] >> 24) ^ ((state[2*i] << 8) & 0xffffffff) # shift bytes
state[2*i+1] = (state[2*i+1] >> 8) ^ ((state[2*i+1] << 24) & 0xffffffff) # shift bytes
state[2*i+1] = (state[2*i] + state[2*i+1]) & 0xffffffff # truncate to 32 bits, if necessary
return state
def EaglesongSponge( input_bytes, num_output_bytes, delimiter ):
# parameters
capacity = 256 # must be multiple of 32
rate = 256 # must be multiple of 32
state = [0 for i in range(0, 16)]
# absorbing
for i in range(0, ((len(input_bytes)+1)*8+rate-1) // rate):
for j in range(0, rate//32):
integer = 0
for k in range(0, 4):
if i*rate//8 + j*4 + k < len(input_bytes):
integer = (integer << 8) ^ input_bytes[i*rate//8 + j*4 + k]
elif i*rate//8 + j*4 + k == len(input_bytes):
integer = (integer << 8) ^ delimiter
state[j] = state[j] ^ integer
state = EaglesongPermutation(state)
# squeezing
output_bytes = [0] * num_output_bytes
for i in range(0, num_output_bytes//(rate//8)):
for j in range(0, rate//32):
for k in range(0, 4):
output_bytes[i*rate//8 + j*4 + k] = (state[j] >> (8*k)) & 0xff
state = EaglesongPermutation(state)
return output_bytes
def EaglesongHash( input_bytes ):
# just run the sponge (with delimiter 0x06 -- hashing mode) and truncate to 32 bytes == 256 bits
return EaglesongSponge(bytearray(input_bytes), 32, 0x06)
| 75.5 | 213 | 0.628707 | def PrintState( state ):
s = ""
for i in range(0, 16):
s += "0x%08x" % state[i]
s += " "
print(s)
def EaglesongPermutation( state ):
N = 43
for i in range(0, N):
state = EaglesongRound(state, i)
return state
def EaglesongRound( state, index ):
bitmatrix = [[1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1],
[0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1]]
coefficients = [[0, 2, 4], [0, 13, 22], [0, 4, 19], [0, 3, 14], [0, 27, 31], [0, 3, 8], [0, 17, 26], [0, 3, 12], [0, 18, 22], [0, 12, 18], [0, 4, 7], [0, 4, 31], [0, 12, 27], [0, 7, 17], [0, 7, 8], [0, 1, 13]]
injection_constants = [ 0x6e9e40ae , 0x71927c02 , 0x9a13d3b1 , 0xdaec32ad , 0x3d8951cf , 0xe1c9fe9a , 0xb806b54c , 0xacbbf417 ,
0xd3622b3b , 0xa082762a , 0x9edcf1c0 , 0xa9bada77 , 0x7f91e46c , 0xcb0f6e4f , 0x265d9241 , 0xb7bdeab0 ,
0x6260c9e6 , 0xff50dd2a , 0x9036aa71 , 0xce161879 , 0xd1307cdf , 0x89e456df , 0xf83133e2 , 0x65f55c3d ,
0x94871b01 , 0xb5d204cd , 0x583a3264 , 0x5e165957 , 0x4cbda964 , 0x675fca47 , 0xf4a3033e , 0x2a417322 ,
0x3b61432f , 0x7f5532f2 , 0xb609973b , 0x1a795239 , 0x31b477c9 , 0xd2949d28 , 0x78969712 , 0x0eb87b6e ,
0x7e11d22d , 0xccee88bd , 0xeed07eb8 , 0xe5563a81 , 0xe7cb6bcf , 0x25de953e , 0x4d05653a , 0x0b831557 ,
0x94b9cd77 , 0x13f01579 , 0x794b4a4a , 0x67e7c7dc , 0xc456d8d4 , 0x59689c9b , 0x668456d7 , 0x22d2a2e1 ,
0x38b3a828 , 0x0315ac3c , 0x438d681e , 0xab7109c5 , 0x97ee19a8 , 0xde062b2e , 0x2c76c47b , 0x0084456f ,
0x908f0fd3 , 0xa646551f , 0x3e826725 , 0xd521788e , 0x9f01c2b0 , 0x93180cdc , 0x92ea1df8 , 0x431a9aae ,
0x7c2ea356 , 0xda33ad03 , 0x46926893 , 0x66bde7d7 , 0xb501cc75 , 0x1f6e8a41 , 0x685250f4 , 0x3bb1f318 ,
0xaf238c04 , 0x974ed2ec , 0x5b159e49 , 0xd526f8bf , 0x12085626 , 0x3e2432a9 , 0x6bd20c48 , 0x1f1d59da ,
0x18ab1068 , 0x80f83cf8 , 0x2c8c11c0 , 0x7d548035 , 0x0ff675c3 , 0xfed160bf , 0x74bbbb24 , 0xd98e006b ,
0xdeaa47eb , 0x05f2179e , 0x437b0b71 , 0xa7c95f8f , 0x00a99d3b , 0x3fc3c444 , 0x72686f8e , 0x00fd01a9 ,
0xdedc0787 , 0xc6af7626 , 0x7012fe76 , 0xf2a5f7ce , 0x9a7b2eda , 0x5e57fcf2 , 0x4da0d4ad , 0x5c63b155 ,
0x34117375 , 0xd4134c11 , 0x2ea77435 , 0x5278b6de , 0xab522c4c , 0xbc8fc702 , 0xc94a09e4 , 0xebb93a9e ,
0x91ecb65e , 0x4c52ecc6 , 0x8703bb52 , 0xcb2d60aa , 0x30a0538a , 0x1514f10b , 0x157f6329 , 0x3429dc3d ,
0x5db73eb2 , 0xa7a1a969 , 0x7286bd24 , 0x0df6881e , 0x3785ba5f , 0xcd04623a , 0x02758170 , 0xd827f556 ,
0x99d95191 , 0x84457eb1 , 0x58a7fb22 , 0xd2967c5f , 0x4f0c33f6 , 0x4a02099a , 0xe0904821 , 0x94124036 ,
0x496a031b , 0x780b69c4 , 0xcf1a4927 , 0x87a119b8 , 0xcdfaf4f8 , 0x4cf9cd0f , 0x27c96a84 , 0x6d11117e ,
0x7f8cf847 , 0x74ceede5 , 0xc88905e6 , 0x60215841 , 0x7172875a , 0x736e993a , 0x010aa53c , 0x43d53c2b ,
0xf0d91a93 , 0x0d983b56 , 0xf816663c , 0xe5d13363 , 0x0a61737c , 0x09d51150 , 0x83a5ac2f , 0x3e884905 ,
0x7b01aeb5 , 0x600a6ea7 , 0xb7678f7b , 0x72b38977 , 0x068018f2 , 0xce6ae45b , 0x29188aa8 , 0xe5a0b1e9 ,
0xc04c2b86 , 0x8bd14d75 , 0x648781f3 , 0xdbae1e0a , 0xddcdd8ae , 0xab4d81a3 , 0x446baaba , 0x1cc0c19d ,
0x17be4f90 , 0x82c0e65d , 0x676f9c95 , 0x5c708db2 , 0x6fd4c867 , 0xa5106ef0 , 0x19dde49d , 0x78182f95 ,
0xd089cd81 , 0xa32e98fe , 0xbe306c82 , 0x6cd83d8c , 0x037f1bde , 0x0b15722d , 0xeddc1e22 , 0x93c76559 ,
0x8a2f571b , 0x92cc81b4 , 0x021b7477 , 0x67523904 , 0xc95dbccc , 0xac17ee9d , 0x944e46bc , 0x0781867e ,
0xc854dd9d , 0x26e2c30c , 0x858c0416 , 0x6d397708 , 0xebe29c58 , 0xc80ced86 , 0xd496b4ab , 0xbe45e6f5 ,
0x10d24706 , 0xacf8187a , 0x96f523cb , 0x2227e143 , 0x78c36564 , 0x4643adc2 , 0x4729d97a , 0xcff93e0d ,
0x25484bbd , 0x91c6798e , 0x95f773f4 , 0x44204675 , 0x2eda57ba , 0x06d313ef , 0xeeaa4466 , 0x2dfa7530 ,
0xa8af0c9b , 0x39f1535e , 0x0cc2b7bd , 0x38a76c0e , 0x4f41071d , 0xcdaf2475 , 0x49a6eff8 , 0x01621748 ,
0x36ebacab , 0xbd6d9a29 , 0x44d1cd65 , 0x40815dfd , 0x55fa5a1a , 0x87cce9e9 , 0xae559b45 , 0xd76b4c26 ,
0x637d60ad , 0xde29f5f9 , 0x97491cbb , 0xfb350040 , 0xffe7f997 , 0x201c9dcd , 0xe61320e9 , 0xa90987a3 ,
0xe24afa83 , 0x61c1e6fc , 0xcc87ff62 , 0xf1c9d8fa , 0x4fd04546 , 0x90ecc76e , 0x46e456b9 , 0x305dceb8 ,
0xf627e68c , 0x2d286815 , 0xc705bbfd , 0x101b6df3 , 0x892dae62 , 0xd5b7fb44 , 0xea1d5c94 , 0x5332e3cb ,
0xf856f88a , 0xb341b0e9 , 0x28408d9d , 0x5421bc17 , 0xeb9af9bc , 0x602371c5 , 0x67985a91 , 0xd774907f ,
0x7c4d697d , 0x9370b0b8 , 0x6ff5cebb , 0x7d465744 , 0x674ceac0 , 0xea9102fc , 0x0de94784 , 0xc793de69 ,
0xfe599bb1 , 0xc6ad952f , 0x6d6ca9c3 , 0x928c3f91 , 0xf9022f05 , 0x24a164dc , 0xe5e98cd3 , 0x7649efdb ,
0x6df3bcdb , 0x5d1e9ff1 , 0x17f5d010 , 0xe2686ea1 , 0x6eac77fe , 0x7bb5c585 , 0x88d90cbb , 0x18689163 ,
0x67c9efa5 , 0xc0b76d9b , 0x960efbab , 0xbd872807 , 0x70f4c474 , 0x56c29d20 , 0xd1541d15 , 0x88137033 ,
0xe3f02b3e , 0xb6d9b28d , 0x53a077ba , 0xeedcd29e , 0xa50a6c1d , 0x12c2801e , 0x52ba335b , 0x35984614 ,
0xe2599aa8 , 0xaf94ed1d , 0xd90d4767 , 0x202c7d07 , 0x77bec4f4 , 0xfa71bc80 , 0xfc5c8b76 , 0x8d0fbbfc ,
0xda366dc6 , 0x8b32a0c7 , 0x1b36f7fc , 0x6642dcbc , 0x6fe7e724 , 0x8b5fa782 , 0xc4227404 , 0x3a7d1da7 ,
0x517ed658 , 0x8a18df6d , 0x3e5c9b23 , 0x1fbd51ef , 0x1470601d , 0x3400389c , 0x676b065d , 0x8864ad80 ,
0xea6f1a9c , 0x2db484e1 , 0x608785f0 , 0x8dd384af , 0x69d26699 , 0x409c4e16 , 0x77f9986a , 0x7f491266 ,
0x883ea6cf , 0xeaa06072 , 0xfa2e5db5 , 0x352594b4 , 0x9156bb89 , 0xa2fbbbfb , 0xac3989c7 , 0x6e2422b1 ,
0x581f3560 , 0x1009a9b5 , 0x7e5ad9cd , 0xa9fc0a6e , 0x43e5998e , 0x7f8778f9 , 0xf038f8e1 , 0x5415c2e8 ,
0x6499b731 , 0xb82389ae , 0x05d4d819 , 0x0f06440e , 0xf1735aa0 , 0x986430ee , 0x47ec952c , 0xbf149cc5 ,
0xb3cb2cb6 , 0x3f41e8c2 , 0x271ac51b , 0x48ac5ded , 0xf76a0469 , 0x717bba4d , 0x4f5c90d6 , 0x3b74f756 ,
0x1824110a , 0xa4fd43e3 , 0x1eb0507c , 0xa9375c08 , 0x157c59a7 , 0x0cad8f51 , 0xd66031a0 , 0xabb5343f ,
0xe533fa43 , 0x1996e2bb , 0xd7953a71 , 0xd2529b94 , 0x58f0fa07 , 0x4c9b1877 , 0x057e990d , 0x8bfe19c4 ,
0xa8e2c0c9 , 0x99fcaada , 0x69d2aaca , 0xdc1c4642 , 0xf4d22307 , 0x7fe27e8c , 0x1366aa07 , 0x1594e637 ,
0xce1066bf , 0xdb922552 , 0x9930b52a , 0xaeaa9a3e , 0x31ff7eb4 , 0x5e1f945a , 0x150ac49c , 0x0ccdac2d ,
0xd8a8a217 , 0xb82ea6e5 , 0xd6a74659 , 0x67b7e3e6 , 0x836eef4a , 0xb6f90074 , 0x7fa3ea4b , 0xcb038123 ,
0xbf069f55 , 0x1fa83fc4 , 0xd6ebdb23 , 0x16f0a137 , 0x19a7110d , 0x5ff3b55f , 0xfb633868 , 0xb466f845 ,
0xbce0c198 , 0x88404296 , 0xddbdd88b , 0x7fc52546 , 0x63a553f8 , 0xa728405a , 0x378a2bce , 0x6862e570 ,
0xefb77e7d , 0xc611625e , 0x32515c15 , 0x6984b765 , 0xe8405976 , 0x9ba386fd , 0xd4eed4d9 , 0xf8fe0309 ,
0x0ce54601 , 0xbaf879c2 , 0xd8524057 , 0x1d8c1d7a , 0x72c0a3a9 , 0x5a1ffbde , 0x82f33a45 , 0x5143f446 ,
0x29c7e182 , 0xe536c32f , 0x5a6f245b , 0x44272adb , 0xcb701d9c , 0xf76137ec , 0x0841f145 , 0xe7042ecc ,
0xf1277dd7 , 0x745cf92c , 0xa8fe65fe , 0xd3e2d7cf , 0x54c513ef , 0x6079bc2d , 0xb66336b0 , 0x101e383b ,
0xbcd75753 , 0x25be238a , 0x56a6f0be , 0xeeffcc17 , 0x5ea31f3d , 0x0ae772f5 , 0xf76de3de , 0x1bbecdad ,
0xc9107d43 , 0xf7e38dce , 0x618358cd , 0x5c833f04 , 0xf6975906 , 0xde4177e5 , 0x67d314dc , 0xb4760f3e ,
0x56ce5888 , 0x0e8345a8 , 0xbff6b1bf , 0x78dfb112 , 0xf1709c1e , 0x7bb8ed8b , 0x902402b9 , 0xdaa64ae0 ,
0x46b71d89 , 0x7eee035f , 0xbe376509 , 0x99648f3a , 0x0863ea1f , 0x49ad8887 , 0x79bdecc5 , 0x3c10b568 ,
0x5f2e4bae , 0x04ef20ab , 0x72f8ce7b , 0x521e1ebe , 0x14525535 , 0x2e8af95b , 0x9094ccfd , 0xbcf36713 ,
0xc73953ef , 0xd4b91474 , 0x6554ec2d , 0xe3885c96 , 0x03dc73b7 , 0x931688a9 , 0xcbbef182 , 0x2b77cfc9 ,
0x632a32bd , 0xd2115dcc , 0x1ae5533d , 0x32684e13 , 0x4cc5a004 , 0x13321bde , 0x62cbd38d , 0x78383a3b ,
0xd00686f1 , 0x9f601ee7 , 0x7eaf23de , 0x3110c492 , 0x9c351209 , 0x7eb89d52 , 0x6d566eac , 0xc2efd226 ,
0x32e9fac5 , 0x52227274 , 0x09f84725 , 0xb8d0b605 , 0x72291f02 , 0x71b5c34b , 0x3dbfcbb8 , 0x04a02263 ,
0x55ba597f , 0xd4e4037d , 0xc813e1be , 0xffddeefa , 0xc3c058f3 , 0x87010f2e , 0x1dfcf55f , 0xc694eeeb ,
0xa9c01a74 , 0x98c2fc6b , 0xe57e1428 , 0xdd265a71 , 0x836b956d , 0x7e46ab1a , 0x5835d541 , 0x50b32505 ,
0xe640913c , 0xbb486079 , 0xfe496263 , 0x113c5b69 , 0x93cd6620 , 0x5efe823b , 0x2d657b40 , 0xb46dfc6c ,
0x57710c69 , 0xfe9fadeb , 0xb5f8728a , 0xe3224170 , 0xca28b751 , 0xfdabae56 , 0x5ab12c3c , 0xa697c457 ,
0xd28fa2b7 , 0x056579f2 , 0x9fd9d810 , 0xe3557478 , 0xd88d89ab , 0xa72a9422 , 0x6d47abd0 , 0x405bcbd9 ,
0x6f83ebaf , 0x13caec76 , 0xfceb9ee2 , 0x2e922df7 , 0xce9856df , 0xc05e9322 , 0x2772c854 , 0xb67f2a32 ,
0x6d1af28d , 0x3a78cf77 , 0xdff411e4 , 0x61c74ca9 , 0xed8b842e , 0x72880845 , 0x6e857085 , 0xc6404932 ,
0xee37f6bc , 0x27116f48 , 0x5e9ec45a , 0x8ea2a51f , 0xa5573db7 , 0xa746d036 , 0x486b4768 , 0x5b438f3b ,
0x18c54a5c , 0x64fcf08e , 0xe993cdc1 , 0x35c1ead3 , 0x9de07de7 , 0x321b841c , 0x87423c5e , 0x071aa0f6 ,
0x962eb75b , 0xbb06bdd2 , 0xdcdb5363 , 0x389752f2 , 0x83d9cc88 , 0xd014adc6 , 0xc71121bb , 0x2372f938 ,
0xcaff2650 , 0x62be8951 , 0x56dccaff , 0xac4084c0 , 0x09712e95 , 0x1d3c288f , 0x1b085744 , 0xe1d3cfef ,
0x5c9a812e , 0x6611fd59 , 0x85e46044 , 0x1981d885 , 0x5a4c903f , 0x43f30d4b , 0x7d1d601b , 0xdd3c3391 ,
0x030ec65e , 0xc12878cd , 0x72e795fe , 0xd0c76abd , 0x1ec085db , 0x7cbb61fa , 0x93e8dd1e , 0x8582eb06 ,
0x73563144 , 0x049d4e7e , 0x5fd5aefe , 0x7b842a00 , 0x75ced665 , 0xbb32d458 , 0x4e83bba7 , 0x8f15151f ,
0x7795a125 , 0xf0842455 , 0x499af99d , 0x565cc7fa , 0xa3b1278d , 0x3f27ce74 , 0x96ca058e , 0x8a497443 ,
0xa6fb8cae , 0xc115aa21 , 0x17504923 , 0xe4932402 , 0xaea886c2 , 0x8eb79af5 , 0xebd5ea6b , 0xc7980d3b ,
0x71369315 , 0x796e6a66 , 0x3a7ec708 , 0xb05175c8 , 0xe02b74e7 , 0xeb377ad3 , 0x6c8c1f54 , 0xb980c374 ,
0x59aee281 , 0x449cb799 , 0xe01f5605 , 0xed0e085e , 0xc9a1a3b4 , 0xaac481b1 , 0xc935c39c , 0xb7d8ce7f ]
new = [0 for i in range(0,16)]
for j in range(0, 16):
for k in range(0, 16):
new[j] = new[j] ^ (state[k] * bitmatrix[k][j])
new[j] = new[j] & 0xffffffff
state = new
for i in range(0, 16):
acc = 0
for j in range(0, 3):
acc = acc ^ (state[i] << coefficients[i][j]) ^ (state[i] >> (32-coefficients[i][j]))
state[i] = acc & 0xffffffff
for i in range(0, 16):
state[i] = state[i] ^ injection_constants[index*16 + i]
for i in range(0, 8):
state[2*i] = (state[2*i] + state[2*i+1]) & 0xffffffff
state[2*i] = (state[2*i] >> 24) ^ ((state[2*i] << 8) & 0xffffffff)
state[2*i+1] = (state[2*i+1] >> 8) ^ ((state[2*i+1] << 24) & 0xffffffff)
state[2*i+1] = (state[2*i] + state[2*i+1]) & 0xffffffff
return state
def EaglesongSponge( input_bytes, num_output_bytes, delimiter ):
capacity = 256
rate = 256
state = [0 for i in range(0, 16)]
for i in range(0, ((len(input_bytes)+1)*8+rate-1) // rate):
for j in range(0, rate//32):
integer = 0
for k in range(0, 4):
if i*rate//8 + j*4 + k < len(input_bytes):
integer = (integer << 8) ^ input_bytes[i*rate//8 + j*4 + k]
elif i*rate//8 + j*4 + k == len(input_bytes):
integer = (integer << 8) ^ delimiter
state[j] = state[j] ^ integer
state = EaglesongPermutation(state)
output_bytes = [0] * num_output_bytes
for i in range(0, num_output_bytes//(rate//8)):
for j in range(0, rate//32):
for k in range(0, 4):
output_bytes[i*rate//8 + j*4 + k] = (state[j] >> (8*k)) & 0xff
state = EaglesongPermutation(state)
return output_bytes
def EaglesongHash( input_bytes ):
return EaglesongSponge(bytearray(input_bytes), 32, 0x06)
| true | true |
f72f22a0fe94fdce3a0eac34cb4c8736d4df683c | 19,363 | py | Python | pynndescent/utils.py | samggreenberg/pynndescent | f97bc2fe01e4e59c5dad20ed23b9cb47e8182b6c | [
"BSD-2-Clause"
] | null | null | null | pynndescent/utils.py | samggreenberg/pynndescent | f97bc2fe01e4e59c5dad20ed23b9cb47e8182b6c | [
"BSD-2-Clause"
] | null | null | null | pynndescent/utils.py | samggreenberg/pynndescent | f97bc2fe01e4e59c5dad20ed23b9cb47e8182b6c | [
"BSD-2-Clause"
] | null | null | null | # Author: Leland McInnes <leland.mcinnes@gmail.com>
#
# License: BSD 2 clause
import time
import numba
from numba.core import types
import numba.experimental.structref as structref
import numpy as np
@numba.njit("void(i8[:], i8)", cache=True)
def seed(rng_state, seed):
"""Seed the random number generator with a given seed."""
rng_state.fill(seed + 0xFFFF)
@numba.njit("i4(i8[:])", cache=True)
def tau_rand_int(state):
"""A fast (pseudo)-random number generator.
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random int32 value
"""
state[0] = (((state[0] & 4294967294) << 12) & 0xFFFFFFFF) ^ (
(((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19
)
state[1] = (((state[1] & 4294967288) << 4) & 0xFFFFFFFF) ^ (
(((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25
)
state[2] = (((state[2] & 4294967280) << 17) & 0xFFFFFFFF) ^ (
(((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11
)
return state[0] ^ state[1] ^ state[2]
@numba.njit("f4(i8[:])", cache=True)
def tau_rand(state):
"""A fast (pseudo)-random number generator for floats in the range [0,1]
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random float32 in the interval [0, 1]
"""
integer = tau_rand_int(state)
return abs(float(integer) / 0x7FFFFFFF)
@numba.njit(
[
"f4(f4[::1])",
numba.types.float32(
numba.types.Array(numba.types.float32, 1, "C", readonly=True)
),
],
locals={
"dim": numba.types.intp,
"i": numba.types.uint32,
# "result": numba.types.float32, # This provides speed, but causes errors in corner cases
},
fastmath=True,
cache=True,
)
def norm(vec):
"""Compute the (standard l2) norm of a vector.
Parameters
----------
vec: array of shape (dim,)
Returns
-------
The l2 norm of vec.
"""
result = 0.0
dim = vec.shape[0]
for i in range(dim):
result += vec[i] * vec[i]
return np.sqrt(result)
@numba.njit(cache=True)
def rejection_sample(n_samples, pool_size, rng_state):
"""Generate n_samples many integers from 0 to pool_size such that no
integer is selected twice. The duplication constraint is achieved via
rejection sampling.
Parameters
----------
n_samples: int
The number of random samples to select from the pool
pool_size: int
The size of the total pool of candidates to sample from
rng_state: array of int64, shape (3,)
Internal state of the random number generator
Returns
-------
sample: array of shape(n_samples,)
The ``n_samples`` randomly selected elements from the pool.
"""
result = np.empty(n_samples, dtype=np.int64)
for i in range(n_samples):
reject_sample = True
j = 0
while reject_sample:
j = tau_rand_int(rng_state) % pool_size
for k in range(i):
if j == result[k]:
break
else:
reject_sample = False
result[i] = j
return result
@structref.register
class HeapType(types.StructRef):
pass
class Heap(structref.StructRefProxy):
@property
def indices(self):
return Heap_get_indices(self)
@property
def distances(self):
return Heap_get_distances(self)
@property
def flags(self):
return Heap_get_flags(self)
@numba.njit(cache=True)
def Heap_get_flags(self):
return self.flags
@numba.njit(cache=True)
def Heap_get_distances(self):
return self.distances
@numba.njit(cache=True)
def Heap_get_indices(self):
return self.indices
structref.define_proxy(Heap, HeapType, ["indices", "distances", "flags"])
# Heap = namedtuple("Heap", ("indices", "distances", "flags"))
@numba.njit(cache=True)
def make_heap(n_points, size):
"""Constructor for the numba enabled heap objects. The heaps are used
for approximate nearest neighbor search, maintaining a list of potential
neighbors sorted by their distance. We also flag if potential neighbors
are newly added to the list or not. Internally this is stored as
a single ndarray; the first axis determines whether we are looking at the
array of candidate graph_indices, the array of distances, or the flag array for
whether elements are new or not. Each of these arrays are of shape
(``n_points``, ``size``)
Parameters
----------
n_points: int
The number of graph_data points to track in the heap.
size: int
The number of items to keep on the heap for each graph_data point.
Returns
-------
heap: An ndarray suitable for passing to other numba enabled heap functions.
"""
indices = np.full((int(n_points), int(size)), -1, dtype=np.int32)
distances = np.full((int(n_points), int(size)), np.infty, dtype=np.float32)
flags = np.zeros((int(n_points), int(size)), dtype=np.uint8)
result = (indices, distances, flags)
return result
@numba.njit(cache=True)
def siftdown(heap1, heap2, elt):
"""Restore the heap property for a heap with an out of place element
at position ``elt``. This works with a heap pair where heap1 carries
the weights and heap2 holds the corresponding elements."""
while elt * 2 + 1 < heap1.shape[0]:
left_child = elt * 2 + 1
right_child = left_child + 1
swap = elt
if heap1[swap] < heap1[left_child]:
swap = left_child
if right_child < heap1.shape[0] and heap1[swap] < heap1[right_child]:
swap = right_child
if swap == elt:
break
else:
heap1[elt], heap1[swap] = heap1[swap], heap1[elt]
heap2[elt], heap2[swap] = heap2[swap], heap2[elt]
elt = swap
@numba.njit(parallel=True, cache=False)
def deheap_sort(indices, distances):
"""Given two arrays representing a heap (indices and distances), reorder the
arrays by increasing distance. This is effectively just the second half of
heap sort (the first half not being required since we already have the
graph_data in a heap).
Note that this is done in-place.
Parameters
----------
indices : array of shape (n_samples, n_neighbors)
The graph indices to sort by distance.
distances : array of shape (n_samples, n_neighbors)
The corresponding edge distance.
Returns
-------
indices, distances: arrays of shape (n_samples, n_neighbors)
The indices and distances sorted by increasing distance.
"""
for i in numba.prange(indices.shape[0]):
# starting from the end of the array and moving back
for j in range(indices.shape[1] - 1, 0, -1):
indices[i, 0], indices[i, j] = indices[i, j], indices[i, 0]
distances[i, 0], distances[i, j] = distances[i, j], distances[i, 0]
siftdown(distances[i, :j], indices[i, :j], 0)
return indices, distances
# @numba.njit()
# def smallest_flagged(heap, row):
# """Search the heap for the smallest element that is
# still flagged.
#
# Parameters
# ----------
# heap: array of shape (3, n_samples, n_neighbors)
# The heaps to search
#
# row: int
# Which of the heaps to search
#
# Returns
# -------
# index: int
# The index of the smallest flagged element
# of the ``row``th heap, or -1 if no flagged
# elements remain in the heap.
# """
# ind = heap[0][row]
# dist = heap[1][row]
# flag = heap[2][row]
#
# min_dist = np.inf
# result_index = -1
#
# for i in range(ind.shape[0]):
# if flag[i] == 1 and dist[i] < min_dist:
# min_dist = dist[i]
# result_index = i
#
# if result_index >= 0:
# flag[result_index] = 0.0
# return int(ind[result_index])
# else:
# return -1
@numba.njit(parallel=True, locals={"idx": numba.types.int64}, cache=False)
def new_build_candidates(current_graph, max_candidates, rng_state, n_threads):
"""Build a heap of candidate neighbors for nearest neighbor descent. For
each vertex the candidate neighbors are any current neighbors, and any
vertices that have the vertex as one of their nearest neighbors.
Parameters
----------
current_graph: heap
The current state of the graph for nearest neighbor descent.
max_candidates: int
The maximum number of new candidate neighbors.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
candidate_neighbors: A heap with an array of (randomly sorted) candidate
neighbors for each vertex in the graph.
"""
current_indices = current_graph[0]
current_flags = current_graph[2]
n_vertices = current_indices.shape[0]
n_neighbors = current_indices.shape[1]
new_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)
new_candidate_priority = np.full(
(n_vertices, max_candidates), np.inf, dtype=np.float32
)
old_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)
old_candidate_priority = np.full(
(n_vertices, max_candidates), np.inf, dtype=np.float32
)
for n in numba.prange(n_threads):
local_rng_state = rng_state + n
for i in range(n_vertices):
for j in range(n_neighbors):
idx = current_indices[i, j]
isn = current_flags[i, j]
if idx < 0:
continue
d = tau_rand(local_rng_state)
if isn:
if i % n_threads == n:
checked_heap_push(
new_candidate_priority[i], new_candidate_indices[i], d, idx
)
if idx % n_threads == n:
checked_heap_push(
new_candidate_priority[idx],
new_candidate_indices[idx],
d,
i,
)
else:
if i % n_threads == n:
checked_heap_push(
old_candidate_priority[i], old_candidate_indices[i], d, idx
)
if idx % n_threads == n:
checked_heap_push(
old_candidate_priority[idx],
old_candidate_indices[idx],
d,
i,
)
indices = current_graph[0]
flags = current_graph[2]
for i in numba.prange(n_vertices):
for j in range(n_neighbors):
idx = indices[i, j]
for k in range(max_candidates):
if new_candidate_indices[i, k] == idx:
flags[i, j] = 0
break
return new_candidate_indices, old_candidate_indices
@numba.njit("b1(u1[::1],i4)", cache=True)
def has_been_visited(table, candidate):
loc = candidate >> 3
mask = 1 << (candidate & 7)
return table[loc] & mask
@numba.njit("void(u1[::1],i4)", cache=True)
def mark_visited(table, candidate):
loc = candidate >> 3
mask = 1 << (candidate & 7)
table[loc] |= mask
return
@numba.njit(
"i4(f4[::1],i4[::1],f4,i4)",
fastmath=True,
locals={
"size": numba.types.intp,
"i": numba.types.uint16,
"ic1": numba.types.uint16,
"ic2": numba.types.uint16,
"i_swap": numba.types.uint16,
},
cache=True,
)
def simple_heap_push(priorities, indices, p, n):
if p >= priorities[0]:
return 0
size = priorities.shape[0]
# insert val at position zero
priorities[0] = p
indices[0] = n
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= size:
break
elif ic2 >= size:
if priorities[ic1] > p:
i_swap = ic1
else:
break
elif priorities[ic1] >= priorities[ic2]:
if p < priorities[ic1]:
i_swap = ic1
else:
break
else:
if p < priorities[ic2]:
i_swap = ic2
else:
break
priorities[i] = priorities[i_swap]
indices[i] = indices[i_swap]
i = i_swap
priorities[i] = p
indices[i] = n
return 1
@numba.njit(
"i4(f4[::1],i4[::1],f4,i4)",
fastmath=True,
locals={
"size": numba.types.intp,
"i": numba.types.uint16,
"ic1": numba.types.uint16,
"ic2": numba.types.uint16,
"i_swap": numba.types.uint16,
},
cache=True,
)
def checked_heap_push(priorities, indices, p, n):
if p >= priorities[0]:
return 0
size = priorities.shape[0]
# break if we already have this element.
for i in range(size):
if n == indices[i]:
return 0
# insert val at position zero
priorities[0] = p
indices[0] = n
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= size:
break
elif ic2 >= size:
if priorities[ic1] > p:
i_swap = ic1
else:
break
elif priorities[ic1] >= priorities[ic2]:
if p < priorities[ic1]:
i_swap = ic1
else:
break
else:
if p < priorities[ic2]:
i_swap = ic2
else:
break
priorities[i] = priorities[i_swap]
indices[i] = indices[i_swap]
i = i_swap
priorities[i] = p
indices[i] = n
return 1
@numba.njit(
"i4(f4[::1],i4[::1],u1[::1],f4,i4,u1)",
fastmath=True,
locals={
"size": numba.types.intp,
"i": numba.types.uint16,
"ic1": numba.types.uint16,
"ic2": numba.types.uint16,
"i_swap": numba.types.uint16,
},
cache=True,
)
def checked_flagged_heap_push(priorities, indices, flags, p, n, f):
if p >= priorities[0]:
return 0
size = priorities.shape[0]
# break if we already have this element.
for i in range(size):
if n == indices[i]:
return 0
# insert val at position zero
priorities[0] = p
indices[0] = n
flags[0] = f
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= size:
break
elif ic2 >= size:
if priorities[ic1] > p:
i_swap = ic1
else:
break
elif priorities[ic1] >= priorities[ic2]:
if p < priorities[ic1]:
i_swap = ic1
else:
break
else:
if p < priorities[ic2]:
i_swap = ic2
else:
break
priorities[i] = priorities[i_swap]
indices[i] = indices[i_swap]
flags[i] = flags[i_swap]
i = i_swap
priorities[i] = p
indices[i] = n
flags[i] = f
return 1
@numba.njit(
parallel=True,
locals={
"p": numba.int32,
"q": numba.int32,
"d": numba.float32,
"added": numba.uint8,
"n": numba.uint32,
"i": numba.uint32,
"j": numba.uint32,
},
cache=False,
)
def apply_graph_updates_low_memory(current_graph, updates, n_threads):
n_changes = 0
priorities = current_graph[1]
indices = current_graph[0]
flags = current_graph[2]
# n_threads = numba.get_num_threads()
for n in numba.prange(n_threads):
for i in range(len(updates)):
for j in range(len(updates[i])):
p, q, d = updates[i][j]
if p == -1 or q == -1:
continue
if p % n_threads == n:
added = checked_flagged_heap_push(
priorities[p], indices[p], flags[p], d, q, 1
)
n_changes += added
if q % n_threads == n:
added = checked_flagged_heap_push(
priorities[q], indices[q], flags[q], d, p, 1
)
n_changes += added
return n_changes
@numba.njit(locals={"p": numba.types.int64, "q": numba.types.int64}, cache=True)
def apply_graph_updates_high_memory(current_graph, updates, in_graph):
n_changes = 0
for i in range(len(updates)):
for j in range(len(updates[i])):
p, q, d = updates[i][j]
if p == -1 or q == -1:
continue
if q in in_graph[p] and p in in_graph[q]:
continue
elif q in in_graph[p]:
pass
else:
added = checked_flagged_heap_push(
current_graph[1][p],
current_graph[0][p],
current_graph[2][p],
d,
q,
1,
)
if added > 0:
in_graph[p].add(q)
n_changes += added
if p == q or p in in_graph[q]:
pass
else:
added = checked_flagged_heap_push(
current_graph[1][p],
current_graph[0][p],
current_graph[2][p],
d,
q,
1,
)
if added > 0:
in_graph[q].add(p)
n_changes += added
return n_changes
@numba.njit(cache=True)
def initalize_heap_from_graph_indices(heap, graph_indices, data, metric):
for i in range(graph_indices.shape[0]):
for idx in range(graph_indices.shape[1]):
j = graph_indices[i, idx]
if j >= 0:
d = metric(data[i], data[j])
checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)
return heap
@numba.njit(parallel=True, cache=False)
def sparse_initalize_heap_from_graph_indices(
heap, graph_indices, data_indptr, data_indices, data_vals, metric
):
for i in numba.prange(graph_indices.shape[0]):
for idx in range(graph_indices.shape[1]):
j = graph_indices[i, idx]
ind1 = data_indices[data_indptr[i] : data_indptr[i + 1]]
data1 = data_vals[data_indptr[i] : data_indptr[i + 1]]
ind2 = data_indices[data_indptr[j] : data_indptr[j + 1]]
data2 = data_vals[data_indptr[j] : data_indptr[j + 1]]
d = metric(ind1, data1, ind2, data2)
checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)
return heap
# Generates a timestamp for use in logging messages when verbose=True
def ts():
return time.ctime(time.time())
| 27.157083 | 97 | 0.549553 |
import time
import numba
from numba.core import types
import numba.experimental.structref as structref
import numpy as np
@numba.njit("void(i8[:], i8)", cache=True)
def seed(rng_state, seed):
rng_state.fill(seed + 0xFFFF)
@numba.njit("i4(i8[:])", cache=True)
def tau_rand_int(state):
state[0] = (((state[0] & 4294967294) << 12) & 0xFFFFFFFF) ^ (
(((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19
)
state[1] = (((state[1] & 4294967288) << 4) & 0xFFFFFFFF) ^ (
(((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25
)
state[2] = (((state[2] & 4294967280) << 17) & 0xFFFFFFFF) ^ (
(((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11
)
return state[0] ^ state[1] ^ state[2]
@numba.njit("f4(i8[:])", cache=True)
def tau_rand(state):
integer = tau_rand_int(state)
return abs(float(integer) / 0x7FFFFFFF)
@numba.njit(
[
"f4(f4[::1])",
numba.types.float32(
numba.types.Array(numba.types.float32, 1, "C", readonly=True)
),
],
locals={
"dim": numba.types.intp,
"i": numba.types.uint32,
c):
result = 0.0
dim = vec.shape[0]
for i in range(dim):
result += vec[i] * vec[i]
return np.sqrt(result)
@numba.njit(cache=True)
def rejection_sample(n_samples, pool_size, rng_state):
result = np.empty(n_samples, dtype=np.int64)
for i in range(n_samples):
reject_sample = True
j = 0
while reject_sample:
j = tau_rand_int(rng_state) % pool_size
for k in range(i):
if j == result[k]:
break
else:
reject_sample = False
result[i] = j
return result
@structref.register
class HeapType(types.StructRef):
pass
class Heap(structref.StructRefProxy):
@property
def indices(self):
return Heap_get_indices(self)
@property
def distances(self):
return Heap_get_distances(self)
@property
def flags(self):
return Heap_get_flags(self)
@numba.njit(cache=True)
def Heap_get_flags(self):
return self.flags
@numba.njit(cache=True)
def Heap_get_distances(self):
return self.distances
@numba.njit(cache=True)
def Heap_get_indices(self):
return self.indices
structref.define_proxy(Heap, HeapType, ["indices", "distances", "flags"])
@numba.njit(cache=True)
def make_heap(n_points, size):
indices = np.full((int(n_points), int(size)), -1, dtype=np.int32)
distances = np.full((int(n_points), int(size)), np.infty, dtype=np.float32)
flags = np.zeros((int(n_points), int(size)), dtype=np.uint8)
result = (indices, distances, flags)
return result
@numba.njit(cache=True)
def siftdown(heap1, heap2, elt):
while elt * 2 + 1 < heap1.shape[0]:
left_child = elt * 2 + 1
right_child = left_child + 1
swap = elt
if heap1[swap] < heap1[left_child]:
swap = left_child
if right_child < heap1.shape[0] and heap1[swap] < heap1[right_child]:
swap = right_child
if swap == elt:
break
else:
heap1[elt], heap1[swap] = heap1[swap], heap1[elt]
heap2[elt], heap2[swap] = heap2[swap], heap2[elt]
elt = swap
@numba.njit(parallel=True, cache=False)
def deheap_sort(indices, distances):
for i in numba.prange(indices.shape[0]):
for j in range(indices.shape[1] - 1, 0, -1):
indices[i, 0], indices[i, j] = indices[i, j], indices[i, 0]
distances[i, 0], distances[i, j] = distances[i, j], distances[i, 0]
siftdown(distances[i, :j], indices[i, :j], 0)
return indices, distances
# still flagged.
#
# Parameters
# ----------
# heap: array of shape (3, n_samples, n_neighbors)
# The heaps to search
#
# row: int
# Which of the heaps to search
#
# Returns
# -------
# index: int
# The index of the smallest flagged element
# of the ``row``th heap, or -1 if no flagged
# elements remain in the heap.
# """
@numba.njit(parallel=True, locals={"idx": numba.types.int64}, cache=False)
def new_build_candidates(current_graph, max_candidates, rng_state, n_threads):
current_indices = current_graph[0]
current_flags = current_graph[2]
n_vertices = current_indices.shape[0]
n_neighbors = current_indices.shape[1]
new_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)
new_candidate_priority = np.full(
(n_vertices, max_candidates), np.inf, dtype=np.float32
)
old_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)
old_candidate_priority = np.full(
(n_vertices, max_candidates), np.inf, dtype=np.float32
)
for n in numba.prange(n_threads):
local_rng_state = rng_state + n
for i in range(n_vertices):
for j in range(n_neighbors):
idx = current_indices[i, j]
isn = current_flags[i, j]
if idx < 0:
continue
d = tau_rand(local_rng_state)
if isn:
if i % n_threads == n:
checked_heap_push(
new_candidate_priority[i], new_candidate_indices[i], d, idx
)
if idx % n_threads == n:
checked_heap_push(
new_candidate_priority[idx],
new_candidate_indices[idx],
d,
i,
)
else:
if i % n_threads == n:
checked_heap_push(
old_candidate_priority[i], old_candidate_indices[i], d, idx
)
if idx % n_threads == n:
checked_heap_push(
old_candidate_priority[idx],
old_candidate_indices[idx],
d,
i,
)
indices = current_graph[0]
flags = current_graph[2]
for i in numba.prange(n_vertices):
for j in range(n_neighbors):
idx = indices[i, j]
for k in range(max_candidates):
if new_candidate_indices[i, k] == idx:
flags[i, j] = 0
break
return new_candidate_indices, old_candidate_indices
@numba.njit("b1(u1[::1],i4)", cache=True)
def has_been_visited(table, candidate):
loc = candidate >> 3
mask = 1 << (candidate & 7)
return table[loc] & mask
@numba.njit("void(u1[::1],i4)", cache=True)
def mark_visited(table, candidate):
loc = candidate >> 3
mask = 1 << (candidate & 7)
table[loc] |= mask
return
@numba.njit(
"i4(f4[::1],i4[::1],f4,i4)",
fastmath=True,
locals={
"size": numba.types.intp,
"i": numba.types.uint16,
"ic1": numba.types.uint16,
"ic2": numba.types.uint16,
"i_swap": numba.types.uint16,
},
cache=True,
)
def simple_heap_push(priorities, indices, p, n):
if p >= priorities[0]:
return 0
size = priorities.shape[0]
priorities[0] = p
indices[0] = n
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= size:
break
elif ic2 >= size:
if priorities[ic1] > p:
i_swap = ic1
else:
break
elif priorities[ic1] >= priorities[ic2]:
if p < priorities[ic1]:
i_swap = ic1
else:
break
else:
if p < priorities[ic2]:
i_swap = ic2
else:
break
priorities[i] = priorities[i_swap]
indices[i] = indices[i_swap]
i = i_swap
priorities[i] = p
indices[i] = n
return 1
@numba.njit(
"i4(f4[::1],i4[::1],f4,i4)",
fastmath=True,
locals={
"size": numba.types.intp,
"i": numba.types.uint16,
"ic1": numba.types.uint16,
"ic2": numba.types.uint16,
"i_swap": numba.types.uint16,
},
cache=True,
)
def checked_heap_push(priorities, indices, p, n):
if p >= priorities[0]:
return 0
size = priorities.shape[0]
for i in range(size):
if n == indices[i]:
return 0
priorities[0] = p
indices[0] = n
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= size:
break
elif ic2 >= size:
if priorities[ic1] > p:
i_swap = ic1
else:
break
elif priorities[ic1] >= priorities[ic2]:
if p < priorities[ic1]:
i_swap = ic1
else:
break
else:
if p < priorities[ic2]:
i_swap = ic2
else:
break
priorities[i] = priorities[i_swap]
indices[i] = indices[i_swap]
i = i_swap
priorities[i] = p
indices[i] = n
return 1
@numba.njit(
"i4(f4[::1],i4[::1],u1[::1],f4,i4,u1)",
fastmath=True,
locals={
"size": numba.types.intp,
"i": numba.types.uint16,
"ic1": numba.types.uint16,
"ic2": numba.types.uint16,
"i_swap": numba.types.uint16,
},
cache=True,
)
def checked_flagged_heap_push(priorities, indices, flags, p, n, f):
if p >= priorities[0]:
return 0
size = priorities.shape[0]
for i in range(size):
if n == indices[i]:
return 0
priorities[0] = p
indices[0] = n
flags[0] = f
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= size:
break
elif ic2 >= size:
if priorities[ic1] > p:
i_swap = ic1
else:
break
elif priorities[ic1] >= priorities[ic2]:
if p < priorities[ic1]:
i_swap = ic1
else:
break
else:
if p < priorities[ic2]:
i_swap = ic2
else:
break
priorities[i] = priorities[i_swap]
indices[i] = indices[i_swap]
flags[i] = flags[i_swap]
i = i_swap
priorities[i] = p
indices[i] = n
flags[i] = f
return 1
@numba.njit(
parallel=True,
locals={
"p": numba.int32,
"q": numba.int32,
"d": numba.float32,
"added": numba.uint8,
"n": numba.uint32,
"i": numba.uint32,
"j": numba.uint32,
},
cache=False,
)
def apply_graph_updates_low_memory(current_graph, updates, n_threads):
n_changes = 0
priorities = current_graph[1]
indices = current_graph[0]
flags = current_graph[2]
for n in numba.prange(n_threads):
for i in range(len(updates)):
for j in range(len(updates[i])):
p, q, d = updates[i][j]
if p == -1 or q == -1:
continue
if p % n_threads == n:
added = checked_flagged_heap_push(
priorities[p], indices[p], flags[p], d, q, 1
)
n_changes += added
if q % n_threads == n:
added = checked_flagged_heap_push(
priorities[q], indices[q], flags[q], d, p, 1
)
n_changes += added
return n_changes
@numba.njit(locals={"p": numba.types.int64, "q": numba.types.int64}, cache=True)
def apply_graph_updates_high_memory(current_graph, updates, in_graph):
n_changes = 0
for i in range(len(updates)):
for j in range(len(updates[i])):
p, q, d = updates[i][j]
if p == -1 or q == -1:
continue
if q in in_graph[p] and p in in_graph[q]:
continue
elif q in in_graph[p]:
pass
else:
added = checked_flagged_heap_push(
current_graph[1][p],
current_graph[0][p],
current_graph[2][p],
d,
q,
1,
)
if added > 0:
in_graph[p].add(q)
n_changes += added
if p == q or p in in_graph[q]:
pass
else:
added = checked_flagged_heap_push(
current_graph[1][p],
current_graph[0][p],
current_graph[2][p],
d,
q,
1,
)
if added > 0:
in_graph[q].add(p)
n_changes += added
return n_changes
@numba.njit(cache=True)
def initalize_heap_from_graph_indices(heap, graph_indices, data, metric):
for i in range(graph_indices.shape[0]):
for idx in range(graph_indices.shape[1]):
j = graph_indices[i, idx]
if j >= 0:
d = metric(data[i], data[j])
checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)
return heap
@numba.njit(parallel=True, cache=False)
def sparse_initalize_heap_from_graph_indices(
heap, graph_indices, data_indptr, data_indices, data_vals, metric
):
for i in numba.prange(graph_indices.shape[0]):
for idx in range(graph_indices.shape[1]):
j = graph_indices[i, idx]
ind1 = data_indices[data_indptr[i] : data_indptr[i + 1]]
data1 = data_vals[data_indptr[i] : data_indptr[i + 1]]
ind2 = data_indices[data_indptr[j] : data_indptr[j + 1]]
data2 = data_vals[data_indptr[j] : data_indptr[j + 1]]
d = metric(ind1, data1, ind2, data2)
checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)
return heap
def ts():
return time.ctime(time.time())
| true | true |
f72f232db2207147d84c9324167fdd40d2261e65 | 548 | py | Python | backend/manage.py | Web-Multi-Media/HttpStreamingServer | c062840b1323f6a5b35a1d7d26542a0fd6e77b0e | [
"MIT"
] | 5 | 2019-12-25T16:28:26.000Z | 2022-03-22T11:05:16.000Z | backend/manage.py | Web-Multi-Media/HttpStreamingServer | c062840b1323f6a5b35a1d7d26542a0fd6e77b0e | [
"MIT"
] | 26 | 2019-11-14T16:03:26.000Z | 2022-02-10T11:56:04.000Z | backend/manage.py | Web-Multi-Media/HttpStreamingServer | c062840b1323f6a5b35a1d7d26542a0fd6e77b0e | [
"MIT"
] | 2 | 2020-09-01T09:25:14.000Z | 2020-10-09T12:06:35.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StreamingServer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 32.235294 | 79 | 0.689781 |
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StreamingServer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
f72f24046e5fa65b5ee35e45bd348bb5ccc79c50 | 861 | py | Python | ooobuild/dyn/chart/chart_axis_y_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/chart/chart_axis_y_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/chart/chart_axis_y_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.chart
from ...lo.chart.chart_axis_y_supplier import ChartAxisYSupplier as ChartAxisYSupplier
__all__ = ['ChartAxisYSupplier']
| 33.115385 | 86 | 0.763066 |
from ...lo.chart.chart_axis_y_supplier import ChartAxisYSupplier as ChartAxisYSupplier
__all__ = ['ChartAxisYSupplier']
| true | true |
f72f2566c3018af4ef5d78db70a1751fce0ba12b | 4,040 | py | Python | fugue_notebook/__init__.py | gityow/fugue | e975625b33766d8b9dc64c6954871569b59367ec | [
"Apache-2.0"
] | null | null | null | fugue_notebook/__init__.py | gityow/fugue | e975625b33766d8b9dc64c6954871569b59367ec | [
"Apache-2.0"
] | null | null | null | fugue_notebook/__init__.py | gityow/fugue | e975625b33766d8b9dc64c6954871569b59367ec | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from typing import Any
from fugue_version import __version__
from IPython import get_ipython
from IPython.display import Javascript
from fugue_notebook.env import NotebookSetup, _setup_fugue_notebook
_HIGHLIGHT_JS = r"""
require(["codemirror/lib/codemirror"]);
function set(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var fugue_keywords = "fill hash rand even presort persist broadcast params process output outtransform rowcount concurrency prepartition zip print title save append parquet csv json single checkpoint weak strong deterministic yield connect sample seed take sub callback dataframe file";
CodeMirror.defineMIME("text/x-fsql", {
name: "sql",
keywords: set(fugue_keywords + " add after all alter analyze and anti archive array as asc at between bucket buckets by cache cascade case cast change clear cluster clustered codegen collection column columns comment commit compact compactions compute concatenate cost create cross cube current current_date current_timestamp database databases data dbproperties defined delete delimited deny desc describe dfs directories distinct distribute drop else end escaped except exchange exists explain export extended external false fields fileformat first following for format formatted from full function functions global grant group grouping having if ignore import in index indexes inner inpath inputformat insert intersect interval into is items join keys last lateral lazy left like limit lines list load local location lock locks logical macro map minus msck natural no not null nulls of on optimize option options or order out outer outputformat over overwrite partition partitioned partitions percent preceding principals purge range recordreader recordwriter recover reduce refresh regexp rename repair replace reset restrict revoke right rlike role roles rollback rollup row rows schema schemas select semi separated serde serdeproperties set sets show skewed sort sorted start statistics stored stratify struct table tables tablesample tblproperties temp temporary terminated then to touch transaction transactions transform true truncate unarchive unbounded uncache union unlock unset use using values view when where window with"),
builtin: set("date datetime tinyint smallint int bigint boolean float double string binary timestamp decimal array map struct uniontype delimited serde sequencefile textfile rcfile inputformat outputformat"),
atoms: set("false true null"),
operatorChars: /^[*\/+\-%<>!=~&|^]/,
dateSQL: set("time"),
support: set("ODBCdotTable doubleQuote zerolessFloat")
});
CodeMirror.modeInfo.push( {
name: "Fugue SQL",
mime: "text/x-fsql",
mode: "sql"
} );
require(['notebook/js/codecell'], function(codecell) {
codecell.CodeCell.options_default.highlight_modes['magic_text/x-fsql'] = {'reg':[/%%fsql/]} ;
Jupyter.notebook.events.on('kernel_ready.Kernel', function(){
Jupyter.notebook.get_cells().map(function(cell){
if (cell.cell_type == 'code'){ cell.auto_highlight(); } }) ;
});
});
"""
def load_ipython_extension(ip: Any) -> None:
"""Entrypoint for IPython %load_ext"""
_setup_fugue_notebook(ip, None)
def _jupyter_nbextension_paths():
"""Entrypoint for Jupyter extension"""
return [
{
"section": "notebook",
"src": "nbextension",
"dest": "fugue_notebook",
"require": "fugue_notebook/main",
}
]
def setup(notebook_setup: Any = None, is_lab: bool = False) -> Any:
"""Setup the notebook environment inside notebook without
installing the jupyter extension or loading ipython extension
:param notebook_setup: ``None`` or an instance of
:class:`~.fugue_notebook.env.NotebookSetup`, defaults to None
"""
ip = get_ipython()
_setup_fugue_notebook(ip, notebook_setup)
if not is_lab:
return Javascript(_HIGHLIGHT_JS)
| 56.111111 | 1,543 | 0.746287 |
from typing import Any
from fugue_version import __version__
from IPython import get_ipython
from IPython.display import Javascript
from fugue_notebook.env import NotebookSetup, _setup_fugue_notebook
_HIGHLIGHT_JS = r"""
require(["codemirror/lib/codemirror"]);
function set(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var fugue_keywords = "fill hash rand even presort persist broadcast params process output outtransform rowcount concurrency prepartition zip print title save append parquet csv json single checkpoint weak strong deterministic yield connect sample seed take sub callback dataframe file";
CodeMirror.defineMIME("text/x-fsql", {
name: "sql",
keywords: set(fugue_keywords + " add after all alter analyze and anti archive array as asc at between bucket buckets by cache cascade case cast change clear cluster clustered codegen collection column columns comment commit compact compactions compute concatenate cost create cross cube current current_date current_timestamp database databases data dbproperties defined delete delimited deny desc describe dfs directories distinct distribute drop else end escaped except exchange exists explain export extended external false fields fileformat first following for format formatted from full function functions global grant group grouping having if ignore import in index indexes inner inpath inputformat insert intersect interval into is items join keys last lateral lazy left like limit lines list load local location lock locks logical macro map minus msck natural no not null nulls of on optimize option options or order out outer outputformat over overwrite partition partitioned partitions percent preceding principals purge range recordreader recordwriter recover reduce refresh regexp rename repair replace reset restrict revoke right rlike role roles rollback rollup row rows schema schemas select semi separated serde serdeproperties set sets show skewed sort sorted start statistics stored stratify struct table tables tablesample tblproperties temp temporary terminated then to touch transaction transactions transform true truncate unarchive unbounded uncache union unlock unset use using values view when where window with"),
builtin: set("date datetime tinyint smallint int bigint boolean float double string binary timestamp decimal array map struct uniontype delimited serde sequencefile textfile rcfile inputformat outputformat"),
atoms: set("false true null"),
operatorChars: /^[*\/+\-%<>!=~&|^]/,
dateSQL: set("time"),
support: set("ODBCdotTable doubleQuote zerolessFloat")
});
CodeMirror.modeInfo.push( {
name: "Fugue SQL",
mime: "text/x-fsql",
mode: "sql"
} );
require(['notebook/js/codecell'], function(codecell) {
codecell.CodeCell.options_default.highlight_modes['magic_text/x-fsql'] = {'reg':[/%%fsql/]} ;
Jupyter.notebook.events.on('kernel_ready.Kernel', function(){
Jupyter.notebook.get_cells().map(function(cell){
if (cell.cell_type == 'code'){ cell.auto_highlight(); } }) ;
});
});
"""
def load_ipython_extension(ip: Any) -> None:
_setup_fugue_notebook(ip, None)
def _jupyter_nbextension_paths():
return [
{
"section": "notebook",
"src": "nbextension",
"dest": "fugue_notebook",
"require": "fugue_notebook/main",
}
]
def setup(notebook_setup: Any = None, is_lab: bool = False) -> Any:
ip = get_ipython()
_setup_fugue_notebook(ip, notebook_setup)
if not is_lab:
return Javascript(_HIGHLIGHT_JS)
| true | true |
f72f25b50dcbfd6ed34ab185f0b12887078ac367 | 1,814 | py | Python | prnet/utils/render_app.py | RonnyLV/PRNet | 0c2ded7042ceee2b2f9bba02bc19d91d4c3993c5 | [
"MIT"
] | null | null | null | prnet/utils/render_app.py | RonnyLV/PRNet | 0c2ded7042ceee2b2f9bba02bc19d91d4c3993c5 | [
"MIT"
] | null | null | null | prnet/utils/render_app.py | RonnyLV/PRNet | 0c2ded7042ceee2b2f9bba02bc19d91d4c3993c5 | [
"MIT"
] | null | null | null | import numpy as np
from prnet.utils.render import vis_of_vertices, render_texture
from scipy import ndimage
def get_visibility(vertices, triangles, h, w):
triangles = triangles.T
vertices_vis = vis_of_vertices(vertices.T, triangles, h, w)
vertices_vis = vertices_vis.astype(bool)
for k in range(2):
tri_vis = vertices_vis[triangles[0,:]] | vertices_vis[triangles[1,:]] | vertices_vis[triangles[2,:]]
ind = triangles[:, tri_vis]
vertices_vis[ind] = True
# for k in range(2):
# tri_vis = vertices_vis[triangles[0,:]] & vertices_vis[triangles[1,:]] & vertices_vis[triangles[2,:]]
# ind = triangles[:, tri_vis]
# vertices_vis[ind] = True
vertices_vis = vertices_vis.astype(np.float32) #1 for visible and 0 for non-visible
return vertices_vis
def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution):
triangles = triangles.T
vertices_vis = vertices_vis.astype(np.float32)
uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)
uv_mask = np.squeeze(uv_mask > 0)
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = uv_mask.astype(np.float32)
return np.squeeze(uv_mask)
def get_depth_image(vertices, triangles, h, w, isShow = False):
z = vertices[:, 2:]
if isShow:
z = z/max(z)
depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1)
return np.squeeze(depth_image) | 45.35 | 110 | 0.691841 | import numpy as np
from prnet.utils.render import vis_of_vertices, render_texture
from scipy import ndimage
def get_visibility(vertices, triangles, h, w):
triangles = triangles.T
vertices_vis = vis_of_vertices(vertices.T, triangles, h, w)
vertices_vis = vertices_vis.astype(bool)
for k in range(2):
tri_vis = vertices_vis[triangles[0,:]] | vertices_vis[triangles[1,:]] | vertices_vis[triangles[2,:]]
ind = triangles[:, tri_vis]
vertices_vis[ind] = True
vertices_vis = vertices_vis.astype(np.float32)
return vertices_vis
def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution):
triangles = triangles.T
vertices_vis = vertices_vis.astype(np.float32)
uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)
uv_mask = np.squeeze(uv_mask > 0)
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = uv_mask.astype(np.float32)
return np.squeeze(uv_mask)
def get_depth_image(vertices, triangles, h, w, isShow = False):
z = vertices[:, 2:]
if isShow:
z = z/max(z)
depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1)
return np.squeeze(depth_image) | true | true |
f72f26220b3eace9e7640d67550958c5c1e52ae2 | 969 | py | Python | mysite/urls.py | taylorculver/Django_Project_Improvement | a03eb076eb170c0ec74d9edac515f826cf2ee30e | [
"Unlicense"
] | null | null | null | mysite/urls.py | taylorculver/Django_Project_Improvement | a03eb076eb170c0ec74d9edac515f826cf2ee30e | [
"Unlicense"
] | null | null | null | mysite/urls.py | taylorculver/Django_Project_Improvement | a03eb076eb170c0ec74d9edac515f826cf2ee30e | [
"Unlicense"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('menu.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 31.258065 | 77 | 0.69453 | from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('menu.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| true | true |
f72f28a8cc0117ce6a4927c59ca5ac49a7281a79 | 7,388 | py | Python | cyder/base/mixins.py | zeeman/cyder | 10d347e77554628e4c7af478d0f57b6c14a608ef | [
"BSD-3-Clause"
] | null | null | null | cyder/base/mixins.py | zeeman/cyder | 10d347e77554628e4c7af478d0f57b6c14a608ef | [
"BSD-3-Clause"
] | null | null | null | cyder/base/mixins.py | zeeman/cyder | 10d347e77554628e4c7af478d0f57b6c14a608ef | [
"BSD-3-Clause"
] | null | null | null | import os
import fcntl
from string import Template
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.loading import get_model
from django.forms import ModelChoiceField, HiddenInput
from django import forms
from cyder.base.utils import filter_by_ctnr
class DisplayMixin(object):
# Knobs
justs = {
'pk_just': 10,
'rhs_just': 1,
'ttl_just': 6,
'rdtype_just': 7,
'rdclass_just': 3,
'prio_just': 2,
'lhs_just': 61,
'extra_just': 1
}
def bind_render_record(self, pk=False, custom=None):
kwargs = vars(self)
if custom:
for key, value in custom.items():
kwargs[key] = value
template = Template(self.template).substitute(**self.justs)
bind_name = self.fqdn + "."
if not self.ttl:
self.ttl = 3600
return template.format(bind_name=bind_name, rdtype=self.rdtype,
rdclass='IN', **kwargs)
class ObjectUrlMixin(object):
"""
This is a mixin that adds important url methods to a model. This
class uses the ``_meta.db_table`` instance variable of an object to
calculate URLs. Because of this, you must use the app label of your
class when declaring urls in your urls.py.
"""
@classmethod
def get_list_url(cls):
"""
Return the 'list' url of an object. Class method since don't
need specific instance of object.
"""
return reverse(cls._meta.db_table)
@classmethod
def get_create_url(cls):
"""Return the create url of the type of object (to be posted to)."""
return cls.get_list_url()
def get_update_url(self):
"""Return the update url of an object."""
return reverse(self._meta.db_table + '-update', args=[self.pk])
def get_delete_url(self):
"""Return the delete url of an object."""
return reverse('delete')
def get_detail_url(self):
"""Return the detail url of an object."""
try:
return reverse(self._meta.db_table + '-detail', args=[self.pk])
except NoReverseMatch:
return ''
def get_table_update_url(self):
"""Return the editableGrid update url of an object."""
try:
return reverse(self._meta.db_table + '-table-update',
args=[self.pk])
except NoReverseMatch:
return ''
def details(self):
"""
Return base details with generic postback URL for editable tables.
"""
return {'url': self.get_table_update_url()}
class UsabilityFormMixin(object):
def append_required_all(self):
for fieldname, field in self.fields.items():
if self.fields[fieldname].required is True:
if self.fields[fieldname].label is None:
fname = fieldname.replace('_', ' ')
self.fields[fieldname].label = fname.capitalize() + '*'
else:
self.fields[fieldname].label += '*'
def alphabetize_all(self):
for fieldname, field in self.fields.items():
if hasattr(field, 'queryset'):
self.fields[fieldname].queryset = field.queryset.order_by(
*field.queryset.model.sort_fields)
def filter_by_ctnr_all(self, request):
from cyder.core.ctnr.models import Ctnr
from cyder.cydns.domain.models import Domain
ctnr = request.session['ctnr']
for fieldname, field in self.fields.items():
if not hasattr(field, 'queryset'):
continue
queryset = self.fields[fieldname].queryset
if queryset.model is Ctnr:
ctnrs = set(c.pk for c in request.session['ctnrs'])
for pk in [1, 2]:
if pk in ctnrs:
ctnrs.remove(pk)
if self.fields[fieldname].initial:
ctnrs.add(self.fields[fieldname].initial.pk)
queryset = queryset.filter(pk__in=ctnrs)
else:
queryset = filter_by_ctnr(ctnr=ctnr,
objects=field.queryset).distinct()
if queryset.count() == 1:
self.fields[fieldname].initial = queryset[0]
self.fields[fieldname].queryset = queryset
def autoselect_system(self):
System = get_model('cyder', 'system')
if 'system' in self.initial:
self.fields['system'] = ModelChoiceField(
widget=HiddenInput(),
empty_label='',
queryset=System.objects.filter(pk=int(self.initial['system'])))
elif 'system' in self.fields:
del(self.fields['system'])
def autoselect_ctnr(self, request):
if 'ctnr' not in self.fields:
return
ctnr = request.session['ctnr']
if ctnr.name != "global":
if 'ctnr' not in self.initial:
self.fields['ctnr'].initial = request.session['ctnr']
self.fields['ctnr'].widget = HiddenInput()
def make_usable(self, request):
self.autoselect_system()
self.autoselect_ctnr(request)
if 'ctnr' in request.session:
self.filter_by_ctnr_all(request)
self.alphabetize_all()
self.append_required_all()
class MutexMixin(object):
def __enter__(self):
self.lock()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unlock()
def lock(self):
if not os.path.exists(os.path.dirname(self.lock_file)):
os.makedirs(os.path.dirname(self.lock_file))
self.log_debug("Attempting to lock {0}..."
.format(self.lock_file))
self.lock_fd = open(self.lock_file, 'w')
try:
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as exc_value:
self.lock_fd.close()
# IOError: [Errno 11] Resource temporarily unavailable
if exc_value[0] == 11:
with open(self.pid_file, 'r') as pid_fd:
self._lock_failure(pid_fd.read())
else:
raise
self.log_debug("Lock acquired")
try:
with open(self.pid_file, 'w') as pid_fd:
pid_fd.write(unicode(os.getpid()))
except IOError as exc_value:
# IOError: [Errno 2] No such file or directory
if exc_value[0] == 2:
self.error(
"Failed to acquire lock on {0}, but the process that has "
"it hasn't written the PID file ({1}) yet.".format(
self.lock_file, self.pid_file))
else:
raise
def unlock(self):
if not self.lock_fd:
return False
self.log_debug("Releasing lock ({0})...".format(self.lock_file))
fcntl.flock(self.lock_fd, fcntl.LOCK_UN)
self.lock_fd.close()
os.remove(self.pid_file)
os.remove(self.lock_file)
self.log_debug("Unlock complete")
return True
def _lock_failure(self, pid):
self.error('Failed to acquire lock on {0}. Process {1} currently '
'has it.'.format(self.lock_file, pid))
| 32.982143 | 79 | 0.569031 | import os
import fcntl
from string import Template
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.loading import get_model
from django.forms import ModelChoiceField, HiddenInput
from django import forms
from cyder.base.utils import filter_by_ctnr
class DisplayMixin(object):
justs = {
'pk_just': 10,
'rhs_just': 1,
'ttl_just': 6,
'rdtype_just': 7,
'rdclass_just': 3,
'prio_just': 2,
'lhs_just': 61,
'extra_just': 1
}
def bind_render_record(self, pk=False, custom=None):
kwargs = vars(self)
if custom:
for key, value in custom.items():
kwargs[key] = value
template = Template(self.template).substitute(**self.justs)
bind_name = self.fqdn + "."
if not self.ttl:
self.ttl = 3600
return template.format(bind_name=bind_name, rdtype=self.rdtype,
rdclass='IN', **kwargs)
class ObjectUrlMixin(object):
@classmethod
def get_list_url(cls):
return reverse(cls._meta.db_table)
@classmethod
def get_create_url(cls):
return cls.get_list_url()
def get_update_url(self):
return reverse(self._meta.db_table + '-update', args=[self.pk])
def get_delete_url(self):
return reverse('delete')
def get_detail_url(self):
try:
return reverse(self._meta.db_table + '-detail', args=[self.pk])
except NoReverseMatch:
return ''
def get_table_update_url(self):
try:
return reverse(self._meta.db_table + '-table-update',
args=[self.pk])
except NoReverseMatch:
return ''
def details(self):
return {'url': self.get_table_update_url()}
class UsabilityFormMixin(object):
def append_required_all(self):
for fieldname, field in self.fields.items():
if self.fields[fieldname].required is True:
if self.fields[fieldname].label is None:
fname = fieldname.replace('_', ' ')
self.fields[fieldname].label = fname.capitalize() + '*'
else:
self.fields[fieldname].label += '*'
def alphabetize_all(self):
for fieldname, field in self.fields.items():
if hasattr(field, 'queryset'):
self.fields[fieldname].queryset = field.queryset.order_by(
*field.queryset.model.sort_fields)
def filter_by_ctnr_all(self, request):
from cyder.core.ctnr.models import Ctnr
from cyder.cydns.domain.models import Domain
ctnr = request.session['ctnr']
for fieldname, field in self.fields.items():
if not hasattr(field, 'queryset'):
continue
queryset = self.fields[fieldname].queryset
if queryset.model is Ctnr:
ctnrs = set(c.pk for c in request.session['ctnrs'])
for pk in [1, 2]:
if pk in ctnrs:
ctnrs.remove(pk)
if self.fields[fieldname].initial:
ctnrs.add(self.fields[fieldname].initial.pk)
queryset = queryset.filter(pk__in=ctnrs)
else:
queryset = filter_by_ctnr(ctnr=ctnr,
objects=field.queryset).distinct()
if queryset.count() == 1:
self.fields[fieldname].initial = queryset[0]
self.fields[fieldname].queryset = queryset
def autoselect_system(self):
System = get_model('cyder', 'system')
if 'system' in self.initial:
self.fields['system'] = ModelChoiceField(
widget=HiddenInput(),
empty_label='',
queryset=System.objects.filter(pk=int(self.initial['system'])))
elif 'system' in self.fields:
del(self.fields['system'])
def autoselect_ctnr(self, request):
if 'ctnr' not in self.fields:
return
ctnr = request.session['ctnr']
if ctnr.name != "global":
if 'ctnr' not in self.initial:
self.fields['ctnr'].initial = request.session['ctnr']
self.fields['ctnr'].widget = HiddenInput()
def make_usable(self, request):
self.autoselect_system()
self.autoselect_ctnr(request)
if 'ctnr' in request.session:
self.filter_by_ctnr_all(request)
self.alphabetize_all()
self.append_required_all()
class MutexMixin(object):
def __enter__(self):
self.lock()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unlock()
def lock(self):
if not os.path.exists(os.path.dirname(self.lock_file)):
os.makedirs(os.path.dirname(self.lock_file))
self.log_debug("Attempting to lock {0}..."
.format(self.lock_file))
self.lock_fd = open(self.lock_file, 'w')
try:
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as exc_value:
self.lock_fd.close()
if exc_value[0] == 11:
with open(self.pid_file, 'r') as pid_fd:
self._lock_failure(pid_fd.read())
else:
raise
self.log_debug("Lock acquired")
try:
with open(self.pid_file, 'w') as pid_fd:
pid_fd.write(unicode(os.getpid()))
except IOError as exc_value:
if exc_value[0] == 2:
self.error(
"Failed to acquire lock on {0}, but the process that has "
"it hasn't written the PID file ({1}) yet.".format(
self.lock_file, self.pid_file))
else:
raise
def unlock(self):
if not self.lock_fd:
return False
self.log_debug("Releasing lock ({0})...".format(self.lock_file))
fcntl.flock(self.lock_fd, fcntl.LOCK_UN)
self.lock_fd.close()
os.remove(self.pid_file)
os.remove(self.lock_file)
self.log_debug("Unlock complete")
return True
def _lock_failure(self, pid):
self.error('Failed to acquire lock on {0}. Process {1} currently '
'has it.'.format(self.lock_file, pid))
| true | true |
f72f2add0eef057060b14524de685d3508075821 | 23,057 | py | Python | trolley.py | jefftriplett/trolley | 6e440d491bc11c576b6d1470b439df82f667c19a | [
"BSD-3-Clause"
] | 3 | 2015-05-25T13:20:38.000Z | 2019-03-06T18:38:15.000Z | trolley.py | jefftriplett/trolley | 6e440d491bc11c576b6d1470b439df82f667c19a | [
"BSD-3-Clause"
] | null | null | null | trolley.py | jefftriplett/trolley | 6e440d491bc11c576b6d1470b439df82f667c19a | [
"BSD-3-Clause"
] | 3 | 2016-07-15T07:31:48.000Z | 2018-09-29T07:50:34.000Z | #!/usr/bin/env python
"""
Trolley syncs issues between CSV, Github, and Buffer with Trello.
"""
import csv
import datetime
import os
import random
import click
import click_config
import github3
from buffpy.api import API as BufferAPI
from buffpy.managers.profiles import Profiles
from buffpy.managers.updates import Updates
from trello import TrelloClient
__author__ = 'Jeff Triplett'
__copyright__ = 'Copyright 2015, Jeff Triplett'
__license__ = 'BSD'
__version__ = '0.1.6'
# hold auth state
_buffer_auth = None
_github_auth = None
_trello_auth = None
BUFFER_CLIENT_ID = os.environ.get('BUFFER_CLIENT_ID')
BUFFER_CLIENT_SECRET = os.environ.get('BUFFER_CLIENT_SECRET')
BUFFER_ACCESS_TOKEN = os.environ.get('BUFFER_ACCESS_TOKEN')
GITHUB_USERNAME = os.environ.get('GITHUB_USERNAME')
GITHUB_PASSWORD = os.environ.get('GITHUB_PASSWORD')
GITHUB_ORG = os.environ.get('GITHUB_ORG')
GITHUB_REPO = os.environ.get('GITHUB_REPO')
GITHUB_SCOPES = ['user', 'repo']
TRELLO_APP_KEY = os.environ.get('TRELLO_APP_KEY')
TRELLO_APP_SECRET = os.environ.get('TRELLO_APP_SECRET')
TRELLO_AUTH_TOKEN = os.environ.get('TRELLO_AUTH_TOKEN')
TRELLO_BOARD_ID = os.environ.get('TRELLO_BOARD_ID')
TRELLO_DEFAULT_LIST = os.environ.get('TRELLO_DEFAULT_LIST', 'Uncategorized')
# might migrate to:
# http://click.pocoo.org/4/options/#values-from-environment-variables
class config(object):
class buffer(object):
client_id = BUFFER_CLIENT_ID
client_secret = BUFFER_CLIENT_SECRET
access_token = BUFFER_ACCESS_TOKEN
class github(object):
username = GITHUB_USERNAME
password = GITHUB_PASSWORD
org = GITHUB_ORG
repo = GITHUB_REPO
class trello(object):
app_key = TRELLO_APP_KEY
app_secret = TRELLO_APP_SECRET
auth_token = TRELLO_AUTH_TOKEN
board_id = TRELLO_BOARD_ID
default_list = TRELLO_DEFAULT_LIST
# utils
def csv_to_dict_list(filename):
"""Open a CSV file and return a list of dict objects."""
with open(filename) as f:
values = list(csv.DictReader(f))
return values
def get_random_color():
filename = 'etc/color-blind-safe.csv'
colors = csv_to_dict_list(filename)
index = random.randint(0, len(colors))
return colors[index]['color']
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('version {}'.format(__version__))
ctx.exit()
# github utils
def get_github_auth(github_config):
"""Log me into github and return an object."""
global _github_auth
if _github_auth:
return _github_auth
assert github_config.username
assert github_config.password
_github_auth = github3.login(
github_config.username,
github_config.password)
return _github_auth
def get_github_repository(config, github_org, github_repo):
"""Return a repository object and log me in."""
github = get_github_auth(config.github)
repository = github.repository(github_org, github_repo)
return repository
def get_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_issues = [str(item.title) for item in repository.iter_issues()]
return existing_issues
def get_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_labels = [str(item.name) for item in repository.iter_labels()]
return existing_labels
def get_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = [str(item.title) for item in repository.iter_milestones()]
return existing_milestones
# github core
def close_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
issues = [str(issue.title) for issue in repository.iter_issues()]
click.echo('closing {} issues'.format(len(issues)))
for issue in repository.iter_issues():
click.echo('closing issue "{}"'.format(issue.title))
issue.close()
def create_github_issues(config, github_org, github_repo,
filename='etc/default_github_issues.csv'):
issues = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_issues = get_existing_github_issues(config, github_org, github_repo)
click.echo('creating {} issues'.format(len(issues)))
for issue in issues:
title = str(issue['title'])
body = str(issue['body'])
labels = issue['labels']
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if title not in existing_issues:
click.echo('creating issue "{}"'.format(title))
repository.create_issue(title, body, labels=labels)
else:
click.echo('issue "{}" already exists'.format(title))
def create_github_labels(config, github_org, github_repo,
filename='etc/default_github_labels.csv'):
labels = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_labels = get_existing_github_labels(config, github_org, github_repo)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_github_milestones(config, github_org, github_repo,
filename='etc/default_github_milestones.csv'):
milestones = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = get_existing_github_milestones(config, github_org, github_repo)
click.echo('creating {} milestones'.format(len(milestones)))
for milestone in milestones:
title = str(milestone['title'])
if title not in existing_milestones:
click.echo('creating milestone "{}"'.format(title))
repository.create_milestone(title)
else:
click.echo('milestone "{}" already exists'.format(title))
def delete_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
labels = [str(label.name) for label in repository.iter_labels()]
click.echo('removing {} labels'.format(len(labels)))
for label in labels:
click.echo('removing label "{}"'.format(label))
repository.label(label).delete()
def delete_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
milestones = repository.iter_milestones(github_org, github_repo)
click.echo('removing {} milestones'.format(len(list(milestones))))
for milestone in milestones:
click.echo('removing milestone "{}"'.format(milestone.title))
milestone.delete()
# trello utils
def get_trello_auth(trello_config):
"""Log me into trello and return an object."""
global _trello_auth
if _trello_auth:
return _trello_auth
assert trello_config.app_key
assert trello_config.app_secret
assert trello_config.auth_token
_trello_auth = TrelloClient(
api_key=trello_config.app_key,
api_secret=trello_config.app_secret,
token=trello_config.auth_token,
# token_secret=str(trello_config.auth_token),
)
return _trello_auth
def get_existing_trello_boards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
boards = [str(board.name) for board in board.get_cards()]
return boards
def get_existing_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
cards = board.get_cards()
cards = [str(card.name) for card in cards]
return cards
def get_existing_trello_labels(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
labels = board.get_labels()
labels = [label for label in labels]
return labels
def get_existing_trello_lists(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
all_lists = [item.name for item in all_lists]
return all_lists
def get_trello_list_lookup(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
list_lookup = {}
for item in all_lists:
id = item.id
name = item.name
list_lookup[name] = id
list_lookup[id] = name
default_list = config.trello.default_list
if default_list not in list_lookup:
new_list = board.add_list(default_list)
new_list_id = new_list.id
list_lookup[default_list] = new_list_id
list_lookup[new_list_id] = default_list
return list_lookup
# trello core
def create_trello_cards(config, trello_board_id,
filename='etc/default_trello_cards.csv'):
cards = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_cards = get_existing_trello_cards(config, trello_board_id)
board_lookup = get_trello_list_lookup(config, trello_board_id)
category = board_lookup[config.trello.default_list]
board = trello.get_board(trello_board_id)
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = str(card.get('title', ''))
description = str(card.get('body', ''))
labels = card.get('labels', [])
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if name not in existing_cards:
click.echo('creating issue "{}"'.format(name))
list_item = board.get_list(category)
new_card = list_item.add_card(name, description, labels=labels)
'''
# currently labels are broken in the trello python client :/
if len(labels):
for label in labels:
trello.cards.new_label(new_card['id'], label)
'''
else:
click.echo('issue "{}" already exists'.format(name))
def create_trello_labels(config, trello_board_id,
filename='etc/default_trello_labels.csv'):
labels = csv_to_dict_list(filename)
existing_labels = get_existing_trello_labels(config, trello_board_id)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
# TODO: Create Trello label via API
#repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_trello_lists(config, trello_board_id,
filename='etc/default_trello_lists.csv'):
lists = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_lists = get_existing_trello_lists(config, trello_board_id)
click.echo('creating {} lists'.format(len(lists)))
for item in lists:
title = str(item['title'])
if title not in existing_lists:
click.echo('creating list "{}"'.format(title))
trello.boards.new_list(trello_board_id, title)
else:
click.echo('list "{}" already exists'.format(title))
def list_trello_boards(config):
trello = get_trello_auth(config.trello)
boards = trello.list_boards()
for board in boards:
click.echo('{0}: {1}{2}'.format(
board.id,
board.name,
' (closed)' if board.closed else ''
))
def list_trello_organizations(config):
trello = get_trello_auth(config.trello)
organizations = trello.list_organizations()
for organization in organizations:
click.echo('{0}: {1}'.format(
organization.id,
organization.name
))
# sync github and trello
def sync_github_issues_to_trello_cards(config, github_org, github_repo,
trello_board_id):
trello = get_trello_auth(config.trello)
board_lookup = get_trello_list_lookup(config, trello_board_id)
existing_trello_cards = get_existing_trello_cards(config, trello_board_id)
repository = get_github_repository(config, github_org, github_repo)
issues = repository.iter_issues()
#click.echo('creating {} issues'.format(issues.count))
for issue in issues:
title = issue.title
desc = issue.body
category = board_lookup[config.trello.default_list]
if title not in existing_trello_cards:
click.echo('creating issue "{}"'.format(title))
trello.cards.new(title, category, desc=desc)
else:
click.echo('issue "{}" already exists'.format(title))
def sync_trello_cards_to_github_issues(config, trello_board_id, github_org, github_repo):
trello = get_trello_auth(config.trello)
existing_github_issues = get_existing_github_issues(config, github_org, github_repo)
repository = get_github_repository(config, github_org, github_repo)
board = trello.get_board(trello_board_id)
cards = board.all_cards()
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = card.name
# id = card['id']
# list_id = card['idList']
description = card.description
labels = card.labels
if name not in existing_github_issues:
click.echo('creating card "{}"'.format(name))
repository.create_issue(name, description, labels=labels)
else:
click.echo('card "{}" already exists'.format(name))
def list_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(config.trello.board_id)
cards = [card for card in board.open_cards()]
for card in cards:
name = card.name
card_id = card.id
description = card.description
click.echo('{0}: {1}'.format(card_id, name))
if len(description):
click.echo(description)
def get_buffer_auth(buffer_config):
"""Log me into buffer and return an object."""
global _buffer_auth
if _buffer_auth:
return _buffer_auth
assert buffer_config.client_id
assert buffer_config.client_secret
assert buffer_config.access_token
_buffer_auth = BufferAPI(
client_id=buffer_config.client_id,
client_secret=buffer_config.client_secret,
access_token=buffer_config.access_token,
)
return _buffer_auth
def test_buffer(config):
client = get_buffer_auth(config.buffer)
profiles = Profiles(api=client).filter(service='twitter')
if not len(profiles):
raise Exception('Your twitter account is not configured')
profile = profiles[0]
print profile
print
pending = profile.updates.pending
for item in pending:
print item
print item.id
print item.text
print item.scheduled_at
print datetime.datetime.fromtimestamp(item.scheduled_at)
# cli methods we are exposing to be used via terminal
@click.group()
@click_config.wrap(module=config, sections=('github', 'trello'))
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True)
def cli():
assert config.buffer
pass
@cli.command('bootstrap')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_bootstrap(github_org, github_repo):
"""Sets up github with some sensible defaults."""
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('close_existing_github_issues')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_close_existing_github_issues(force, github_org, github_repo):
"""Close all existing GitHub issues."""
message = 'Do you really want to close all of your existing GitHub issues?'
if force or click.confirm(message):
close_existing_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('create_github_issues')
@click.option('--filename', default='etc/default_github_issues.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_issues(filename, github_org, github_repo):
"""Create GitHub issues from a CSV file."""
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_labels')
@click.option('--filename', default='etc/default_github_labels.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_labels(filename, github_org, github_repo):
"""Create GitHub labels from a CSV file."""
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_milestones')
@click.option('--filename', default='etc/default_github_milestones.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_milestones(filename, github_org, github_repo):
"""Create GitHub milestones from a CSV file."""
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_trello_cards')
@click.option('--filename', default='etc/default_trello_cards.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_cards(filename, trello_board):
"""Create Trello cards from a CSV file."""
create_trello_cards(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_labels')
@click.option('--filename', default='etc/default_trello_labels.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_labels(filename, trello_board):
"""Create Trello labels from a CSV file."""
create_trello_labels(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_lists')
@click.option('--filename', default='etc/default_trello_lists.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_lists(filename, trello_board):
"""Create Trello lists from a CSV file."""
create_trello_lists(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('delete_existing_github_labels')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_labels(force, github_org, github_repo):
"""Delete labels from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub labels?'
if force or click.confirm(message):
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('delete_existing_github_milestones')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_milestones(force, github_org, github_repo):
"""Delete milestones from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub milestones?'
if force or click.confirm(message):
delete_existing_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('sync_github_issues_to_trello_cards')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
@click.option('--trello-board', type=str)
def cli_sync_github_issues_to_trello_cards(github_org, github_repo, trello_board):
"""Convert your GitHub issues to Trello cards."""
sync_github_issues_to_trello_cards(
config,
github_org or config.github.org,
github_repo or config.github.repo,
trello_board or config.trello.board_id)
@cli.command('sync_trello_cards_to_github_issues')
@click.option('--trello-board', type=str)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_sync_trello_cards_to_github_issues(trello_board, github_org, github_repo):
"""Convert your Trello cards to GitHub issues."""
sync_trello_cards_to_github_issues(
config,
trello_board or config.trello.board_id,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('list_trello_boards')
def cli_list_trello_boards():
"""List your Trello boards."""
list_trello_boards(config)
@cli.command('list_trello_cards')
@click.option('--trello-board', type=str)
def cli_list_trello_cards(trello_board):
"""List your Trello cards for a given board."""
list_trello_cards(
config,
trello_board or config.trello.board_id)
@cli.command('list_trello_organizations')
def cli_list_trello_organizations():
"""List your Trello organizations."""
list_trello_organizations(config)
@cli.command('test_buffer')
def cli_test_buffer():
"""Convert your Trello cards to GitHub issues."""
try:
test_buffer(config)
except Exception as e:
print e
if __name__ == '__main__':
cli()
| 31.979196 | 89 | 0.688424 |
"""
Trolley syncs issues between CSV, Github, and Buffer with Trello.
"""
import csv
import datetime
import os
import random
import click
import click_config
import github3
from buffpy.api import API as BufferAPI
from buffpy.managers.profiles import Profiles
from buffpy.managers.updates import Updates
from trello import TrelloClient
__author__ = 'Jeff Triplett'
__copyright__ = 'Copyright 2015, Jeff Triplett'
__license__ = 'BSD'
__version__ = '0.1.6'
_buffer_auth = None
_github_auth = None
_trello_auth = None
BUFFER_CLIENT_ID = os.environ.get('BUFFER_CLIENT_ID')
BUFFER_CLIENT_SECRET = os.environ.get('BUFFER_CLIENT_SECRET')
BUFFER_ACCESS_TOKEN = os.environ.get('BUFFER_ACCESS_TOKEN')
GITHUB_USERNAME = os.environ.get('GITHUB_USERNAME')
GITHUB_PASSWORD = os.environ.get('GITHUB_PASSWORD')
GITHUB_ORG = os.environ.get('GITHUB_ORG')
GITHUB_REPO = os.environ.get('GITHUB_REPO')
GITHUB_SCOPES = ['user', 'repo']
TRELLO_APP_KEY = os.environ.get('TRELLO_APP_KEY')
TRELLO_APP_SECRET = os.environ.get('TRELLO_APP_SECRET')
TRELLO_AUTH_TOKEN = os.environ.get('TRELLO_AUTH_TOKEN')
TRELLO_BOARD_ID = os.environ.get('TRELLO_BOARD_ID')
TRELLO_DEFAULT_LIST = os.environ.get('TRELLO_DEFAULT_LIST', 'Uncategorized')
buffer(object):
client_id = BUFFER_CLIENT_ID
client_secret = BUFFER_CLIENT_SECRET
access_token = BUFFER_ACCESS_TOKEN
class github(object):
username = GITHUB_USERNAME
password = GITHUB_PASSWORD
org = GITHUB_ORG
repo = GITHUB_REPO
class trello(object):
app_key = TRELLO_APP_KEY
app_secret = TRELLO_APP_SECRET
auth_token = TRELLO_AUTH_TOKEN
board_id = TRELLO_BOARD_ID
default_list = TRELLO_DEFAULT_LIST
def csv_to_dict_list(filename):
"""Open a CSV file and return a list of dict objects."""
with open(filename) as f:
values = list(csv.DictReader(f))
return values
def get_random_color():
filename = 'etc/color-blind-safe.csv'
colors = csv_to_dict_list(filename)
index = random.randint(0, len(colors))
return colors[index]['color']
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('version {}'.format(__version__))
ctx.exit()
def get_github_auth(github_config):
"""Log me into github and return an object."""
global _github_auth
if _github_auth:
return _github_auth
assert github_config.username
assert github_config.password
_github_auth = github3.login(
github_config.username,
github_config.password)
return _github_auth
def get_github_repository(config, github_org, github_repo):
"""Return a repository object and log me in."""
github = get_github_auth(config.github)
repository = github.repository(github_org, github_repo)
return repository
def get_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_issues = [str(item.title) for item in repository.iter_issues()]
return existing_issues
def get_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_labels = [str(item.name) for item in repository.iter_labels()]
return existing_labels
def get_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = [str(item.title) for item in repository.iter_milestones()]
return existing_milestones
def close_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
issues = [str(issue.title) for issue in repository.iter_issues()]
click.echo('closing {} issues'.format(len(issues)))
for issue in repository.iter_issues():
click.echo('closing issue "{}"'.format(issue.title))
issue.close()
def create_github_issues(config, github_org, github_repo,
filename='etc/default_github_issues.csv'):
issues = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_issues = get_existing_github_issues(config, github_org, github_repo)
click.echo('creating {} issues'.format(len(issues)))
for issue in issues:
title = str(issue['title'])
body = str(issue['body'])
labels = issue['labels']
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if title not in existing_issues:
click.echo('creating issue "{}"'.format(title))
repository.create_issue(title, body, labels=labels)
else:
click.echo('issue "{}" already exists'.format(title))
def create_github_labels(config, github_org, github_repo,
filename='etc/default_github_labels.csv'):
labels = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_labels = get_existing_github_labels(config, github_org, github_repo)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_github_milestones(config, github_org, github_repo,
filename='etc/default_github_milestones.csv'):
milestones = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = get_existing_github_milestones(config, github_org, github_repo)
click.echo('creating {} milestones'.format(len(milestones)))
for milestone in milestones:
title = str(milestone['title'])
if title not in existing_milestones:
click.echo('creating milestone "{}"'.format(title))
repository.create_milestone(title)
else:
click.echo('milestone "{}" already exists'.format(title))
def delete_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
labels = [str(label.name) for label in repository.iter_labels()]
click.echo('removing {} labels'.format(len(labels)))
for label in labels:
click.echo('removing label "{}"'.format(label))
repository.label(label).delete()
def delete_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
milestones = repository.iter_milestones(github_org, github_repo)
click.echo('removing {} milestones'.format(len(list(milestones))))
for milestone in milestones:
click.echo('removing milestone "{}"'.format(milestone.title))
milestone.delete()
def get_trello_auth(trello_config):
"""Log me into trello and return an object."""
global _trello_auth
if _trello_auth:
return _trello_auth
assert trello_config.app_key
assert trello_config.app_secret
assert trello_config.auth_token
_trello_auth = TrelloClient(
api_key=trello_config.app_key,
api_secret=trello_config.app_secret,
token=trello_config.auth_token,
)
return _trello_auth
def get_existing_trello_boards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
boards = [str(board.name) for board in board.get_cards()]
return boards
def get_existing_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
cards = board.get_cards()
cards = [str(card.name) for card in cards]
return cards
def get_existing_trello_labels(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
labels = board.get_labels()
labels = [label for label in labels]
return labels
def get_existing_trello_lists(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
all_lists = [item.name for item in all_lists]
return all_lists
def get_trello_list_lookup(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
list_lookup = {}
for item in all_lists:
id = item.id
name = item.name
list_lookup[name] = id
list_lookup[id] = name
default_list = config.trello.default_list
if default_list not in list_lookup:
new_list = board.add_list(default_list)
new_list_id = new_list.id
list_lookup[default_list] = new_list_id
list_lookup[new_list_id] = default_list
return list_lookup
def create_trello_cards(config, trello_board_id,
filename='etc/default_trello_cards.csv'):
cards = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_cards = get_existing_trello_cards(config, trello_board_id)
board_lookup = get_trello_list_lookup(config, trello_board_id)
category = board_lookup[config.trello.default_list]
board = trello.get_board(trello_board_id)
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = str(card.get('title', ''))
description = str(card.get('body', ''))
labels = card.get('labels', [])
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if name not in existing_cards:
click.echo('creating issue "{}"'.format(name))
list_item = board.get_list(category)
new_card = list_item.add_card(name, description, labels=labels)
'''
# currently labels are broken in the trello python client :/
if len(labels):
for label in labels:
trello.cards.new_label(new_card['id'], label)
'''
else:
click.echo('issue "{}" already exists'.format(name))
def create_trello_labels(config, trello_board_id,
filename='etc/default_trello_labels.csv'):
labels = csv_to_dict_list(filename)
existing_labels = get_existing_trello_labels(config, trello_board_id)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
else:
click.echo('label "{}" already exists'.format(name))
def create_trello_lists(config, trello_board_id,
filename='etc/default_trello_lists.csv'):
lists = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_lists = get_existing_trello_lists(config, trello_board_id)
click.echo('creating {} lists'.format(len(lists)))
for item in lists:
title = str(item['title'])
if title not in existing_lists:
click.echo('creating list "{}"'.format(title))
trello.boards.new_list(trello_board_id, title)
else:
click.echo('list "{}" already exists'.format(title))
def list_trello_boards(config):
trello = get_trello_auth(config.trello)
boards = trello.list_boards()
for board in boards:
click.echo('{0}: {1}{2}'.format(
board.id,
board.name,
' (closed)' if board.closed else ''
))
def list_trello_organizations(config):
trello = get_trello_auth(config.trello)
organizations = trello.list_organizations()
for organization in organizations:
click.echo('{0}: {1}'.format(
organization.id,
organization.name
))
def sync_github_issues_to_trello_cards(config, github_org, github_repo,
trello_board_id):
trello = get_trello_auth(config.trello)
board_lookup = get_trello_list_lookup(config, trello_board_id)
existing_trello_cards = get_existing_trello_cards(config, trello_board_id)
repository = get_github_repository(config, github_org, github_repo)
issues = repository.iter_issues()
for issue in issues:
title = issue.title
desc = issue.body
category = board_lookup[config.trello.default_list]
if title not in existing_trello_cards:
click.echo('creating issue "{}"'.format(title))
trello.cards.new(title, category, desc=desc)
else:
click.echo('issue "{}" already exists'.format(title))
def sync_trello_cards_to_github_issues(config, trello_board_id, github_org, github_repo):
trello = get_trello_auth(config.trello)
existing_github_issues = get_existing_github_issues(config, github_org, github_repo)
repository = get_github_repository(config, github_org, github_repo)
board = trello.get_board(trello_board_id)
cards = board.all_cards()
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = card.name
description = card.description
labels = card.labels
if name not in existing_github_issues:
click.echo('creating card "{}"'.format(name))
repository.create_issue(name, description, labels=labels)
else:
click.echo('card "{}" already exists'.format(name))
def list_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(config.trello.board_id)
cards = [card for card in board.open_cards()]
for card in cards:
name = card.name
card_id = card.id
description = card.description
click.echo('{0}: {1}'.format(card_id, name))
if len(description):
click.echo(description)
def get_buffer_auth(buffer_config):
"""Log me into buffer and return an object."""
global _buffer_auth
if _buffer_auth:
return _buffer_auth
assert buffer_config.client_id
assert buffer_config.client_secret
assert buffer_config.access_token
_buffer_auth = BufferAPI(
client_id=buffer_config.client_id,
client_secret=buffer_config.client_secret,
access_token=buffer_config.access_token,
)
return _buffer_auth
def test_buffer(config):
client = get_buffer_auth(config.buffer)
profiles = Profiles(api=client).filter(service='twitter')
if not len(profiles):
raise Exception('Your twitter account is not configured')
profile = profiles[0]
print profile
print
pending = profile.updates.pending
for item in pending:
print item
print item.id
print item.text
print item.scheduled_at
print datetime.datetime.fromtimestamp(item.scheduled_at)
@click.group()
@click_config.wrap(module=config, sections=('github', 'trello'))
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True)
def cli():
assert config.buffer
pass
@cli.command('bootstrap')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_bootstrap(github_org, github_repo):
"""Sets up github with some sensible defaults."""
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('close_existing_github_issues')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_close_existing_github_issues(force, github_org, github_repo):
"""Close all existing GitHub issues."""
message = 'Do you really want to close all of your existing GitHub issues?'
if force or click.confirm(message):
close_existing_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('create_github_issues')
@click.option('--filename', default='etc/default_github_issues.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_issues(filename, github_org, github_repo):
"""Create GitHub issues from a CSV file."""
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_labels')
@click.option('--filename', default='etc/default_github_labels.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_labels(filename, github_org, github_repo):
"""Create GitHub labels from a CSV file."""
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_milestones')
@click.option('--filename', default='etc/default_github_milestones.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_milestones(filename, github_org, github_repo):
"""Create GitHub milestones from a CSV file."""
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_trello_cards')
@click.option('--filename', default='etc/default_trello_cards.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_cards(filename, trello_board):
"""Create Trello cards from a CSV file."""
create_trello_cards(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_labels')
@click.option('--filename', default='etc/default_trello_labels.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_labels(filename, trello_board):
"""Create Trello labels from a CSV file."""
create_trello_labels(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_lists')
@click.option('--filename', default='etc/default_trello_lists.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_lists(filename, trello_board):
"""Create Trello lists from a CSV file."""
create_trello_lists(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('delete_existing_github_labels')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_labels(force, github_org, github_repo):
"""Delete labels from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub labels?'
if force or click.confirm(message):
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('delete_existing_github_milestones')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_milestones(force, github_org, github_repo):
"""Delete milestones from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub milestones?'
if force or click.confirm(message):
delete_existing_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('sync_github_issues_to_trello_cards')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
@click.option('--trello-board', type=str)
def cli_sync_github_issues_to_trello_cards(github_org, github_repo, trello_board):
"""Convert your GitHub issues to Trello cards."""
sync_github_issues_to_trello_cards(
config,
github_org or config.github.org,
github_repo or config.github.repo,
trello_board or config.trello.board_id)
@cli.command('sync_trello_cards_to_github_issues')
@click.option('--trello-board', type=str)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_sync_trello_cards_to_github_issues(trello_board, github_org, github_repo):
"""Convert your Trello cards to GitHub issues."""
sync_trello_cards_to_github_issues(
config,
trello_board or config.trello.board_id,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('list_trello_boards')
def cli_list_trello_boards():
"""List your Trello boards."""
list_trello_boards(config)
@cli.command('list_trello_cards')
@click.option('--trello-board', type=str)
def cli_list_trello_cards(trello_board):
"""List your Trello cards for a given board."""
list_trello_cards(
config,
trello_board or config.trello.board_id)
@cli.command('list_trello_organizations')
def cli_list_trello_organizations():
"""List your Trello organizations."""
list_trello_organizations(config)
@cli.command('test_buffer')
def cli_test_buffer():
"""Convert your Trello cards to GitHub issues."""
try:
test_buffer(config)
except Exception as e:
print e
if __name__ == '__main__':
cli()
| false | true |
f72f2ae2da1a8824ea37a9d9fc536ef3fce7fc12 | 1,481 | py | Python | tests/test.py | icetana-james/python-onvif-zeep | 9947a3c203037857fbc04c9fafabc722e3436a13 | [
"MIT"
] | null | null | null | tests/test.py | icetana-james/python-onvif-zeep | 9947a3c203037857fbc04c9fafabc722e3436a13 | [
"MIT"
] | null | null | null | tests/test.py | icetana-james/python-onvif-zeep | 9947a3c203037857fbc04c9fafabc722e3436a13 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*-coding=utf-8
from __future__ import print_function, division
import unittest
from onvif import ONVIFCamera, ONVIFError
CAM_HOST = '10.1.3.10'
CAM_PORT = 80
CAM_USER = 'root'
CAM_PASS = 'password'
DEBUG = False
def log(ret):
if DEBUG:
print(ret)
class TestDevice(unittest.TestCase):
# Class level cam. Run this test more efficiently..
cam = ONVIFCamera(CAM_HOST, CAM_PORT, CAM_USER, CAM_PASS)
# ***************** Test Capabilities ***************************
def test_GetWsdlUrl(self):
ret = self.cam.devicemgmt.GetWsdlUrl()
def test_GetHostname(self):
''' Get the hostname from a device '''
self.cam.devicemgmt.GetHostname()
def test_GetServiceCapabilities(self):
'''Returns the capabilities of the devce service.'''
ret = self.cam.devicemgmt.GetServiceCapabilities()
def test_GetDNS(self):
''' Gets the DNS setting from a device '''
ret = self.cam.devicemgmt.GetDNS()
self.assertTrue(hasattr(ret, 'FromDHCP'))
if not ret.FromDHCP and len(ret.DNSManual) > 0:
log(ret.DNSManual[0].Type)
log(ret.DNSManual[0].IPv4Address)
def test_GetNTP(self):
''' Get the NTP settings from a device '''
ret = self.cam.devicemgmt.GetNTP()
if ret.FromDHCP == False:
self.assertTrue(hasattr(ret, 'NTPManual'))
log(ret.NTPManual)
if __name__ == '__main__':
unittest.main()
| 26.927273 | 69 | 0.627279 |
from __future__ import print_function, division
import unittest
from onvif import ONVIFCamera, ONVIFError
CAM_HOST = '10.1.3.10'
CAM_PORT = 80
CAM_USER = 'root'
CAM_PASS = 'password'
DEBUG = False
def log(ret):
if DEBUG:
print(ret)
class TestDevice(unittest.TestCase):
cam = ONVIFCamera(CAM_HOST, CAM_PORT, CAM_USER, CAM_PASS)
def test_GetWsdlUrl(self):
ret = self.cam.devicemgmt.GetWsdlUrl()
def test_GetHostname(self):
self.cam.devicemgmt.GetHostname()
def test_GetServiceCapabilities(self):
ret = self.cam.devicemgmt.GetServiceCapabilities()
def test_GetDNS(self):
ret = self.cam.devicemgmt.GetDNS()
self.assertTrue(hasattr(ret, 'FromDHCP'))
if not ret.FromDHCP and len(ret.DNSManual) > 0:
log(ret.DNSManual[0].Type)
log(ret.DNSManual[0].IPv4Address)
def test_GetNTP(self):
ret = self.cam.devicemgmt.GetNTP()
if ret.FromDHCP == False:
self.assertTrue(hasattr(ret, 'NTPManual'))
log(ret.NTPManual)
if __name__ == '__main__':
unittest.main()
| true | true |
f72f2bb0605f13d7ce68cd600bd8a6628eb6c15d | 249 | py | Python | scripts/fundamentals/codewars_sum_range_of_numbers.py | duttashi/learnpy | c08b76b173b06d66187e51a6939d55d5dd12cb5a | [
"MIT"
] | null | null | null | scripts/fundamentals/codewars_sum_range_of_numbers.py | duttashi/learnpy | c08b76b173b06d66187e51a6939d55d5dd12cb5a | [
"MIT"
] | 77 | 2019-04-20T06:54:19.000Z | 2022-01-16T08:15:20.000Z | scripts/fundamentals/codewars_sum_range_of_numbers.py | duttashi/learnpy | c08b76b173b06d66187e51a6939d55d5dd12cb5a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 8 16:31:07 2020
@author: Ashish
"""
def get_sum(a,b):
numsum=0
for i in range(a,b+1):
numsum+=i
return numsum
print(get_sum(0,1))
| 12.45 | 36 | 0.457831 |
def get_sum(a,b):
numsum=0
for i in range(a,b+1):
numsum+=i
return numsum
print(get_sum(0,1))
| true | true |
f72f2c05186f829fbd3b433c815fb25ffb7d09a0 | 231 | py | Python | core/templatetags/check_if_favorited_filter.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | core/templatetags/check_if_favorited_filter.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | core/templatetags/check_if_favorited_filter.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.filter
def check_if_favorited(document, user):
"""Returns boolean of whether or not a user favorited this document"""
return document.is_favorited(user)
| 25.666667 | 74 | 0.770563 | from django import template
register = template.Library()
@register.filter
def check_if_favorited(document, user):
return document.is_favorited(user)
| true | true |
f72f2dd7cff77a014d7828254d487da410fcc65b | 2,757 | py | Python | implementations/guidi2020/benchmark/PyAligner/test/test_matrix_init.py | r-barnes/sw_comparison | 1ac2c9cc10a32badd6b8fb1e96516c97f7800176 | [
"BSD-Source-Code"
] | null | null | null | implementations/guidi2020/benchmark/PyAligner/test/test_matrix_init.py | r-barnes/sw_comparison | 1ac2c9cc10a32badd6b8fb1e96516c97f7800176 | [
"BSD-Source-Code"
] | null | null | null | implementations/guidi2020/benchmark/PyAligner/test/test_matrix_init.py | r-barnes/sw_comparison | 1ac2c9cc10a32badd6b8fb1e96516c97f7800176 | [
"BSD-Source-Code"
] | null | null | null | import unittest
from pyaligner import *
class TestMatrixInit ( unittest.TestCase ):
def test_matrix_init_global ( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer )
self.assertEqual( matrix.seqh.seq_string, seqh.seq_string )
self.assertEqual( matrix.seqv.seq_string, seqv.seq_string )
self.assertEqual( matrix.scorer.match, scorer.match )
self.assertEqual( matrix.scorer.mismatch, scorer.mismatch )
self.assertEqual( matrix.scorer.gap, scorer.gap )
self.assertEqual( matrix.scorer.xdrop, scorer.xdrop )
self.assertEqual( matrix.semiglobal, False )
self.assertEqual( matrix.dimh, 5 )
self.assertEqual( matrix.dimv, 6 )
self.assertEqual( matrix.max_score, 10 )
self.assertEqual( matrix.max_row, 2 )
self.assertEqual( matrix.max_col, 2 )
self.assertEqual( matrix.dp_matrix, [[ 0, -3, -6, -9, -12 ],
[ -3, 5, 2, "X", "X" ],
[ -6, 2, 10, 7, 4 ],
[ -9, -1, 7, 9, 6 ],
[-12, "X", 4, 6, 8 ],
[-15, "X", "X", "X", 5 ]])
def test_matrix_init_semiglobal ( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer, True )
self.assertEqual( matrix.seqh.seq_string, seqh.seq_string )
self.assertEqual( matrix.seqv.seq_string, seqv.seq_string )
self.assertEqual( matrix.scorer.match, scorer.match )
self.assertEqual( matrix.scorer.mismatch, scorer.mismatch )
self.assertEqual( matrix.scorer.gap, scorer.gap )
self.assertEqual( matrix.scorer.xdrop, scorer.xdrop )
self.assertEqual( matrix.semiglobal, True )
self.assertEqual( matrix.dimh, 5 )
self.assertEqual( matrix.dimv, 6 )
self.assertEqual( matrix.max_score, 10 )
self.assertEqual( matrix.max_row, 2 )
self.assertEqual( matrix.max_col, 2 )
self.assertEqual( matrix.dp_matrix, [[ 0, -3, -6, -9, -12 ],
[ -3, 5, 2, "X", "X" ],
[ -6, 2, 10, 7, 4 ],
[ -9, -1, 7, 9, 6 ],
[-12, "X", 4, 6, 8 ],
[-15, "X", "X", "X", 5 ]])
if __name__ == '__main__':
unittest.main()
| 45.196721 | 73 | 0.488575 | import unittest
from pyaligner import *
class TestMatrixInit ( unittest.TestCase ):
def test_matrix_init_global ( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer )
self.assertEqual( matrix.seqh.seq_string, seqh.seq_string )
self.assertEqual( matrix.seqv.seq_string, seqv.seq_string )
self.assertEqual( matrix.scorer.match, scorer.match )
self.assertEqual( matrix.scorer.mismatch, scorer.mismatch )
self.assertEqual( matrix.scorer.gap, scorer.gap )
self.assertEqual( matrix.scorer.xdrop, scorer.xdrop )
self.assertEqual( matrix.semiglobal, False )
self.assertEqual( matrix.dimh, 5 )
self.assertEqual( matrix.dimv, 6 )
self.assertEqual( matrix.max_score, 10 )
self.assertEqual( matrix.max_row, 2 )
self.assertEqual( matrix.max_col, 2 )
self.assertEqual( matrix.dp_matrix, [[ 0, -3, -6, -9, -12 ],
[ -3, 5, 2, "X", "X" ],
[ -6, 2, 10, 7, 4 ],
[ -9, -1, 7, 9, 6 ],
[-12, "X", 4, 6, 8 ],
[-15, "X", "X", "X", 5 ]])
def test_matrix_init_semiglobal ( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer, True )
self.assertEqual( matrix.seqh.seq_string, seqh.seq_string )
self.assertEqual( matrix.seqv.seq_string, seqv.seq_string )
self.assertEqual( matrix.scorer.match, scorer.match )
self.assertEqual( matrix.scorer.mismatch, scorer.mismatch )
self.assertEqual( matrix.scorer.gap, scorer.gap )
self.assertEqual( matrix.scorer.xdrop, scorer.xdrop )
self.assertEqual( matrix.semiglobal, True )
self.assertEqual( matrix.dimh, 5 )
self.assertEqual( matrix.dimv, 6 )
self.assertEqual( matrix.max_score, 10 )
self.assertEqual( matrix.max_row, 2 )
self.assertEqual( matrix.max_col, 2 )
self.assertEqual( matrix.dp_matrix, [[ 0, -3, -6, -9, -12 ],
[ -3, 5, 2, "X", "X" ],
[ -6, 2, 10, 7, 4 ],
[ -9, -1, 7, 9, 6 ],
[-12, "X", 4, 6, 8 ],
[-15, "X", "X", "X", 5 ]])
if __name__ == '__main__':
unittest.main()
| true | true |
f72f2e9ad66d303ddba9f56bb29c8a001bcf4f92 | 623 | py | Python | visualization/__init__.py | svarthafnyra/CNN_Visualizations | a17615932519e67c7b7ec4ebaf030047dfd6d1e2 | [
"MIT"
] | 17 | 2019-08-13T06:07:13.000Z | 2021-03-02T22:14:21.000Z | visualization/__init__.py | svarthafnyra/CAMP-Project | a17615932519e67c7b7ec4ebaf030047dfd6d1e2 | [
"MIT"
] | null | null | null | visualization/__init__.py | svarthafnyra/CAMP-Project | a17615932519e67c7b7ec4ebaf030047dfd6d1e2 | [
"MIT"
] | 3 | 2019-12-16T09:08:10.000Z | 2020-02-19T10:43:25.000Z | #from visualization.deep_dream import runDeepDream
from visualization.gradcam import runGradCam
from visualization.guided_backprop import runGBackProp
from visualization.guided_gradcam import runGGradCam
from visualization.smooth_grad import runsmoothGrad
#from visualization.inverted_representation import runInvRep
from visualization.vanilla_backprop import runVanillaBP
from visualization.explain import runExplain
from visualization.deepimgprior import runImgPrior
from visualization.gradcam2 import runGradCam2
from visualization.explain2 import runExplain2
from visualization.inverted_representation import runInvRep | 51.916667 | 60 | 0.900482 |
from visualization.gradcam import runGradCam
from visualization.guided_backprop import runGBackProp
from visualization.guided_gradcam import runGGradCam
from visualization.smooth_grad import runsmoothGrad
from visualization.vanilla_backprop import runVanillaBP
from visualization.explain import runExplain
from visualization.deepimgprior import runImgPrior
from visualization.gradcam2 import runGradCam2
from visualization.explain2 import runExplain2
from visualization.inverted_representation import runInvRep | true | true |
f72f30a07b60677628393833bb542a0b8f11afc0 | 22,124 | py | Python | src/buildstream/_artifact.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | src/buildstream/_artifact.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | src/buildstream/_artifact.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2020 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Tom Pollard <tom.pollard@codethink.co.uk>
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
"""
Artifact
=========
Implementation of the Artifact class which aims to 'abstract' direct
artifact composite interaction away from Element class
"""
import os
from typing import Dict, Tuple
from ._protos.buildstream.v2.artifact_pb2 import Artifact as ArtifactProto
from . import _yaml
from . import utils
from .node import Node
from .types import _Scope
from .storage._casbaseddirectory import CasBasedDirectory
from .sandbox._config import SandboxConfig
from ._variables import Variables
# An Artifact class to abstract artifact operations
# from the Element class
#
# Args:
# element (Element): The Element object
# context (Context): The BuildStream context
# strong_key (str): The elements strong cache key, dependent on context
# strict_key (str): The elements strict cache key
# weak_key (str): The elements weak cache key
#
class Artifact:
version = 1
def __init__(self, element, context, *, strong_key=None, strict_key=None, weak_key=None):
self._element = element
self._context = context
self._cache_key = strong_key
self._strict_key = strict_key
self._weak_cache_key = weak_key
self._artifactdir = context.artifactdir
self._cas = context.get_cascache()
self._tmpdir = context.tmpdir
self._proto = None
self._metadata_keys = None # Strong, strict and weak key tuple extracted from the artifact
self._metadata_dependencies = None # Dictionary of dependency strong keys from the artifact
self._metadata_workspaced = None # Boolean of whether it's a workspaced artifact
self._metadata_workspaced_dependencies = None # List of which dependencies are workspaced from the artifact
self._cached = None # Boolean of whether the artifact is cached
# strong_key():
#
# A property which evaluates to the strong key, regardless of whether
# it was the strong key that the Artifact object was initialized with
# or whether it was the strong key loaded from artifact metadata.
#
@property
def strong_key(self) -> str:
if self.cached():
key, _, _ = self.get_metadata_keys()
else:
key = self._cache_key
return key
# strict_key():
#
# A property which evaluates to the strict key, regardless of whether
# it was the strict key that the Artifact object was initialized with
# or whether it was the strict key loaded from artifact metadata.
#
@property
def strict_key(self) -> str:
if self.cached():
_, key, _ = self.get_metadata_keys()
else:
key = self._strict_key
return key
# weak_key():
#
# A property which evaluates to the weak key, regardless of whether
# it was the weak key that the Artifact object was initialized with
# or whether it was the weak key loaded from artifact metadata.
#
@property
def weak_key(self) -> str:
if self.cached():
_, _, key = self.get_metadata_keys()
else:
key = self._weak_cache_key
return key
# get_files():
#
# Get a virtual directory for the artifact files content
#
# Returns:
# (Directory): The virtual directory object
#
def get_files(self):
files_digest = self._get_field_digest("files")
return CasBasedDirectory(self._cas, digest=files_digest)
# get_buildtree():
#
# Get a virtual directory for the artifact buildtree content
#
# Returns:
# (Directory): The virtual directory object
#
def get_buildtree(self):
buildtree_digest = self._get_field_digest("buildtree")
return CasBasedDirectory(self._cas, digest=buildtree_digest)
# get_sources():
#
# Get a virtual directory for the artifact sources
#
# Returns:
# (Directory): The virtual directory object
#
def get_sources(self):
sources_digest = self._get_field_digest("sources")
return CasBasedDirectory(self._cas, digest=sources_digest)
# get_logs():
#
# Get the paths of the artifact's logs
#
# Returns:
# (list): A list of object paths
#
def get_logs(self):
artifact = self._get_proto()
logfile_paths = []
for logfile in artifact.logs:
logfile_paths.append(self._cas.objpath(logfile.digest))
return logfile_paths
# get_extract_key():
#
# Get the key used to extract the artifact
#
# Returns:
# (str): The key
#
def get_extract_key(self):
return self._cache_key or self._weak_cache_key
# cache():
#
# Create the artifact and commit to cache
#
# Args:
# sandbox_build_dir (Directory): Virtual Directory object for the sandbox build-root
# collectvdir (Directory): Virtual Directoy object from within the sandbox for collection
# sourcesvdir (Directory): Virtual Directoy object for the staged sources
# buildresult (tuple): bool, short desc and detailed desc of result
# publicdata (dict): dict of public data to commit to artifact metadata
# variables (Variables): The element's Variables
# environment (dict): dict of the element's environment variables
# sandboxconfig (SandboxConfig): The element's SandboxConfig
#
# Returns:
# (int): The size of the newly cached artifact
#
def cache(
self,
*,
sandbox_build_dir,
collectvdir,
sourcesvdir,
buildresult,
publicdata,
variables,
environment,
sandboxconfig,
):
context = self._context
element = self._element
size = 0
filesvdir = None
buildtreevdir = None
artifact = ArtifactProto()
artifact.version = self.version
# Store result
artifact.build_success = buildresult[0]
artifact.build_error = buildresult[1]
artifact.build_error_details = "" if not buildresult[2] else buildresult[2]
# Store keys
artifact.strong_key = self._cache_key
artifact.strict_key = self._strict_key
artifact.weak_key = self._weak_cache_key
artifact.was_workspaced = bool(element._get_workspace())
properties = ["mtime"] if artifact.was_workspaced else []
# Store files
if collectvdir is not None:
filesvdir = CasBasedDirectory(cas_cache=self._cas)
filesvdir._import_files_internal(collectvdir, properties=properties)
artifact.files.CopyFrom(filesvdir._get_digest())
size += filesvdir._get_size()
# Store public data
with utils._tempnamedfile_name(dir=self._tmpdir) as tmpname:
_yaml.roundtrip_dump(publicdata, tmpname)
public_data_digest = self._cas.add_object(path=tmpname)
artifact.public_data.CopyFrom(public_data_digest)
size += public_data_digest.size_bytes
# Store low diversity metadata, this metadata must have a high
# probability of deduplication, such as environment variables
# and SandboxConfig.
#
with utils._tempnamedfile_name(dir=self._tmpdir) as tmpname:
sandbox_dict = sandboxconfig.to_dict()
low_diversity_dict = {"environment": environment, "sandbox-config": sandbox_dict}
low_diversity_node = Node.from_dict(low_diversity_dict)
_yaml.roundtrip_dump(low_diversity_node, tmpname)
low_diversity_meta_digest = self._cas.add_object(path=tmpname)
artifact.low_diversity_meta.CopyFrom(low_diversity_meta_digest)
size += low_diversity_meta_digest.size_bytes
# Store high diversity metadata, this metadata is expected to diverge
# for every element and as such cannot be deduplicated.
#
with utils._tempnamedfile_name(dir=self._tmpdir) as tmpname:
# The Variables object supports being converted directly to a dictionary
variables_dict = dict(variables)
high_diversity_dict = {"variables": variables_dict}
high_diversity_node = Node.from_dict(high_diversity_dict)
_yaml.roundtrip_dump(high_diversity_node, tmpname)
high_diversity_meta_digest = self._cas.add_object(path=tmpname)
artifact.high_diversity_meta.CopyFrom(high_diversity_meta_digest)
size += high_diversity_meta_digest.size_bytes
# store build dependencies
for e in element._dependencies(_Scope.BUILD):
new_build = artifact.build_deps.add()
new_build.project_name = e.project_name
new_build.element_name = e.name
new_build.cache_key = e._get_cache_key()
new_build.was_workspaced = bool(e._get_workspace())
# Store log file
log_filename = context.messenger.get_log_filename()
if log_filename:
digest = self._cas.add_object(path=log_filename)
log = artifact.logs.add()
log.name = os.path.basename(log_filename)
log.digest.CopyFrom(digest)
size += log.digest.size_bytes
# Store build tree
if sandbox_build_dir is not None:
buildtreevdir = CasBasedDirectory(cas_cache=self._cas)
buildtreevdir._import_files_internal(sandbox_build_dir, properties=properties)
artifact.buildtree.CopyFrom(buildtreevdir._get_digest())
size += buildtreevdir._get_size()
# Store sources
if sourcesvdir is not None:
artifact.sources.CopyFrom(sourcesvdir._get_digest())
size += sourcesvdir._get_size()
os.makedirs(os.path.dirname(os.path.join(self._artifactdir, element.get_artifact_name())), exist_ok=True)
keys = utils._deduplicate([self._cache_key, self._weak_cache_key])
for key in keys:
path = os.path.join(self._artifactdir, element.get_artifact_name(key=key))
with utils.save_file_atomic(path, mode="wb") as f:
f.write(artifact.SerializeToString())
return size
# cached_buildtree()
#
# Check if artifact is cached with expected buildtree. A
# buildtree will not be present if the rest of the partial artifact
# is not cached.
#
# Returns:
# (bool): True if artifact cached with buildtree, False if
# missing expected buildtree. Note this only confirms
# if a buildtree is present, not its contents.
#
def cached_buildtree(self):
buildtree_digest = self._get_field_digest("buildtree")
if buildtree_digest:
return self._cas.contains_directory(buildtree_digest, with_files=True)
else:
return False
# buildtree_exists()
#
# Check if artifact was created with a buildtree. This does not check
# whether the buildtree is present in the local cache.
#
# Returns:
# (bool): True if artifact was created with buildtree
#
def buildtree_exists(self):
artifact = self._get_proto()
return bool(str(artifact.buildtree))
# cached_sources()
#
# Check if artifact is cached with sources.
#
# Returns:
# (bool): True if artifact is cached with sources, False if sources
# are not available.
#
def cached_sources(self):
sources_digest = self._get_field_digest("sources")
if sources_digest:
return self._cas.contains_directory(sources_digest, with_files=True)
else:
return False
# load_public_data():
#
# Loads the public data from the cached artifact
#
# Returns:
# (dict): The artifacts cached public data
#
def load_public_data(self):
# Load the public data from the artifact
artifact = self._get_proto()
with self._cas.open(artifact.public_data) as meta_file:
meta_str = meta_file.read()
data = _yaml.load_data(meta_str, file_name="public.yaml")
return data
# load_sandbox_config():
#
# Loads the sandbox configuration from the cached artifact
#
# Returns:
# The stored SandboxConfig object
#
def load_sandbox_config(self) -> SandboxConfig:
# Load the sandbox data from the artifact
artifact = self._get_proto()
meta_file = self._cas.objpath(artifact.low_diversity_meta)
data = _yaml.load(meta_file, shortname="low-diversity-meta.yaml")
# Extract the sandbox data
config = data.get_mapping("sandbox-config")
# Return a SandboxConfig
return SandboxConfig.new_from_node(config)
# load_environment():
#
# Loads the environment variables from the cached artifact
#
# Returns:
# The environment variables
#
def load_environment(self) -> Dict[str, str]:
# Load the sandbox data from the artifact
artifact = self._get_proto()
meta_file = self._cas.objpath(artifact.low_diversity_meta)
data = _yaml.load(meta_file, shortname="low-diversity-meta.yaml")
# Extract the environment
config = data.get_mapping("environment")
# Return the environment
return config.strip_node_info()
# load_variables():
#
# Loads the element variables from the cached artifact
#
# Returns:
# The element variables
#
def load_variables(self) -> Variables:
# Load the sandbox data from the artifact
artifact = self._get_proto()
meta_file = self._cas.objpath(artifact.high_diversity_meta)
data = _yaml.load(meta_file, shortname="high-diversity-meta.yaml")
# Extract the variables node and return the new Variables instance
variables_node = data.get_mapping("variables")
return Variables(variables_node)
# load_build_result():
#
# Load the build result from the cached artifact
#
# Returns:
# (bool): Whether the artifact of this element present in the artifact cache is of a success
# (str): Short description of the result
# (str): Detailed description of the result
#
def load_build_result(self):
artifact = self._get_proto()
build_result = (artifact.build_success, artifact.build_error, artifact.build_error_details)
return build_result
# get_metadata_keys():
#
# Retrieve the strong and weak keys from the given artifact.
#
# Returns:
# The strong key
# The strict key
# The weak key
#
def get_metadata_keys(self) -> Tuple[str, str, str]:
if self._metadata_keys is not None:
return self._metadata_keys
# Extract proto
artifact = self._get_proto()
strong_key = artifact.strong_key
strict_key = artifact.strict_key
weak_key = artifact.weak_key
self._metadata_keys = (strong_key, strict_key, weak_key)
return self._metadata_keys
# get_metadata_workspaced():
#
# Retrieve the hash of dependency from the given artifact.
#
# Returns:
# (bool): Whether the given artifact was workspaced
#
def get_metadata_workspaced(self):
if self._metadata_workspaced is not None:
return self._metadata_workspaced
# Extract proto
artifact = self._get_proto()
self._metadata_workspaced = artifact.was_workspaced
return self._metadata_workspaced
# get_metadata_workspaced_dependencies():
#
# Retrieve the hash of workspaced dependencies keys from the given artifact.
#
# Returns:
# (list): List of which dependencies are workspaced
#
def get_metadata_workspaced_dependencies(self):
if self._metadata_workspaced_dependencies is not None:
return self._metadata_workspaced_dependencies
# Extract proto
artifact = self._get_proto()
self._metadata_workspaced_dependencies = [
dep.element_name for dep in artifact.build_deps if dep.was_workspaced
]
return self._metadata_workspaced_dependencies
# get_dependency_artifact_names()
#
# Retrieve the artifact names of all of the dependencies in _Scope.BUILD
#
# Returns:
# (list [str]): A list of refs of all build dependencies in staging order.
#
def get_dependency_artifact_names(self):
# XXX: The pylint disable is necessary due to upstream issue:
# https://github.com/PyCQA/pylint/issues/850
from .element import _get_normal_name # pylint: disable=cyclic-import
artifact = self._get_proto()
try:
dependency_refs = [
os.path.join(dep.project_name, _get_normal_name(dep.element_name), dep.cache_key)
for dep in artifact.build_deps
]
except AttributeError:
# If the artifact has no dependencies, the build_deps attribute
# will be missing from the proto.
dependency_refs = []
return dependency_refs
# query_cache():
#
# Check whether the artifact corresponding to the stored cache key is
# available. This also checks whether all required parts of the artifact
# are available, which may depend on command and configuration. The cache
# key used for querying is dependent on the current context.
#
# Returns:
# (bool): Whether artifact is in local cache
#
def query_cache(self):
artifact = self._load_proto()
if not artifact:
self._cached = False
return False
# Check whether 'files' subdirectory is available, with or without file contents
if str(artifact.files) and not self._cas.contains_directory(artifact.files, with_files=True):
self._cached = False
return False
# Check whether public data and logs are available
logfile_digests = [logfile.digest for logfile in artifact.logs]
digests = [artifact.low_diversity_meta, artifact.high_diversity_meta, artifact.public_data] + logfile_digests
if not self._cas.contains_files(digests):
self._cached = False
return False
self._proto = artifact
self._cached = True
return True
# cached()
#
# Return whether the artifact is available in the local cache. This must
# be called after `query_cache()` or `set_cached()`.
#
# Returns:
# (bool): Whether artifact is in local cache
#
def cached(self, *, buildtree=False):
assert self._cached is not None
ret = self._cached
if buildtree:
ret = ret and (self.cached_buildtree() or not self.buildtree_exists())
return ret
# cached_logs()
#
# Check if the artifact is cached with log files.
#
# Returns:
# (bool): True if artifact is cached with logs, False if
# element not cached or missing logs.
#
def cached_logs(self):
# Log files are currently considered an essential part of an artifact.
# If the artifact is cached, its log files are available as well.
return self._element._cached()
# set_cached()
#
# Mark the artifact as cached without querying the filesystem.
# This is used as optimization when we know the artifact is available.
#
def set_cached(self):
self._proto = self._load_proto()
assert self._proto
self._cached = True
# pull()
#
# Pull artifact from remote artifact repository into local artifact cache.
#
# Args:
# pull_buildtrees (bool): Whether to pull buildtrees or not
#
# Returns: True if the artifact has been downloaded, False otherwise
#
def pull(self, *, pull_buildtrees):
artifacts = self._context.artifactcache
pull_key = self.get_extract_key()
if not artifacts.pull(self._element, pull_key, pull_buildtrees=pull_buildtrees):
return False
self.set_cached()
# Add reference for the other key (weak key when pulling with strong key,
# strong key when pulling with weak key)
for key in self.get_metadata_keys():
artifacts.link_key(self._element, pull_key, key)
return True
# load_proto()
#
# Returns:
# (Artifact): Artifact proto
#
def _load_proto(self):
key = self.get_extract_key()
proto_path = os.path.join(self._artifactdir, self._element.get_artifact_name(key=key))
artifact = ArtifactProto()
try:
with open(proto_path, mode="r+b") as f:
artifact.ParseFromString(f.read())
except FileNotFoundError:
return None
os.utime(proto_path)
return artifact
# _get_proto()
#
# Returns:
# (Artifact): Artifact proto
#
def _get_proto(self):
return self._proto
# _get_field_digest()
#
# Returns:
# (Digest): Digest of field specified
#
def _get_field_digest(self, field):
artifact_proto = self._get_proto()
digest = getattr(artifact_proto, field)
if not str(digest):
return None
return digest
| 32.631268 | 117 | 0.650741 |
import os
from typing import Dict, Tuple
from ._protos.buildstream.v2.artifact_pb2 import Artifact as ArtifactProto
from . import _yaml
from . import utils
from .node import Node
from .types import _Scope
from .storage._casbaseddirectory import CasBasedDirectory
from .sandbox._config import SandboxConfig
from ._variables import Variables
class Artifact:
version = 1
def __init__(self, element, context, *, strong_key=None, strict_key=None, weak_key=None):
self._element = element
self._context = context
self._cache_key = strong_key
self._strict_key = strict_key
self._weak_cache_key = weak_key
self._artifactdir = context.artifactdir
self._cas = context.get_cascache()
self._tmpdir = context.tmpdir
self._proto = None
self._metadata_keys = None
self._metadata_dependencies = None
self._metadata_workspaced = None
self._metadata_workspaced_dependencies = None # List of which dependencies are workspaced from the artifact
self._cached = None # Boolean of whether the artifact is cached
# strong_key():
#
# A property which evaluates to the strong key, regardless of whether
# it was the strong key that the Artifact object was initialized with
# or whether it was the strong key loaded from artifact metadata.
#
@property
def strong_key(self) -> str:
if self.cached():
key, _, _ = self.get_metadata_keys()
else:
key = self._cache_key
return key
# strict_key():
#
# A property which evaluates to the strict key, regardless of whether
# it was the strict key that the Artifact object was initialized with
# or whether it was the strict key loaded from artifact metadata.
#
@property
def strict_key(self) -> str:
if self.cached():
_, key, _ = self.get_metadata_keys()
else:
key = self._strict_key
return key
# weak_key():
#
# A property which evaluates to the weak key, regardless of whether
# it was the weak key that the Artifact object was initialized with
# or whether it was the weak key loaded from artifact metadata.
#
@property
def weak_key(self) -> str:
if self.cached():
_, _, key = self.get_metadata_keys()
else:
key = self._weak_cache_key
return key
# get_files():
#
# Get a virtual directory for the artifact files content
#
# Returns:
# (Directory): The virtual directory object
#
def get_files(self):
files_digest = self._get_field_digest("files")
return CasBasedDirectory(self._cas, digest=files_digest)
# get_buildtree():
#
# Get a virtual directory for the artifact buildtree content
#
# Returns:
# (Directory): The virtual directory object
#
def get_buildtree(self):
buildtree_digest = self._get_field_digest("buildtree")
return CasBasedDirectory(self._cas, digest=buildtree_digest)
# get_sources():
#
# Get a virtual directory for the artifact sources
#
# Returns:
# (Directory): The virtual directory object
#
def get_sources(self):
sources_digest = self._get_field_digest("sources")
return CasBasedDirectory(self._cas, digest=sources_digest)
# get_logs():
#
# Get the paths of the artifact's logs
def get_logs(self):
artifact = self._get_proto()
logfile_paths = []
for logfile in artifact.logs:
logfile_paths.append(self._cas.objpath(logfile.digest))
return logfile_paths
def get_extract_key(self):
return self._cache_key or self._weak_cache_key
# environment (dict): dict of the element's environment variables
#
# Returns:
# (int): The size of the newly cached artifact
#
def cache(
self,
*,
sandbox_build_dir,
collectvdir,
sourcesvdir,
buildresult,
publicdata,
variables,
environment,
sandboxconfig,
):
context = self._context
element = self._element
size = 0
filesvdir = None
buildtreevdir = None
artifact = ArtifactProto()
artifact.version = self.version
# Store result
artifact.build_success = buildresult[0]
artifact.build_error = buildresult[1]
artifact.build_error_details = "" if not buildresult[2] else buildresult[2]
# Store keys
artifact.strong_key = self._cache_key
artifact.strict_key = self._strict_key
artifact.weak_key = self._weak_cache_key
artifact.was_workspaced = bool(element._get_workspace())
properties = ["mtime"] if artifact.was_workspaced else []
# Store files
if collectvdir is not None:
filesvdir = CasBasedDirectory(cas_cache=self._cas)
filesvdir._import_files_internal(collectvdir, properties=properties)
artifact.files.CopyFrom(filesvdir._get_digest())
size += filesvdir._get_size()
# Store public data
with utils._tempnamedfile_name(dir=self._tmpdir) as tmpname:
_yaml.roundtrip_dump(publicdata, tmpname)
public_data_digest = self._cas.add_object(path=tmpname)
artifact.public_data.CopyFrom(public_data_digest)
size += public_data_digest.size_bytes
# Store low diversity metadata, this metadata must have a high
# probability of deduplication, such as environment variables
# and SandboxConfig.
#
with utils._tempnamedfile_name(dir=self._tmpdir) as tmpname:
sandbox_dict = sandboxconfig.to_dict()
low_diversity_dict = {"environment": environment, "sandbox-config": sandbox_dict}
low_diversity_node = Node.from_dict(low_diversity_dict)
_yaml.roundtrip_dump(low_diversity_node, tmpname)
low_diversity_meta_digest = self._cas.add_object(path=tmpname)
artifact.low_diversity_meta.CopyFrom(low_diversity_meta_digest)
size += low_diversity_meta_digest.size_bytes
# Store high diversity metadata, this metadata is expected to diverge
# for every element and as such cannot be deduplicated.
#
with utils._tempnamedfile_name(dir=self._tmpdir) as tmpname:
# The Variables object supports being converted directly to a dictionary
variables_dict = dict(variables)
high_diversity_dict = {"variables": variables_dict}
high_diversity_node = Node.from_dict(high_diversity_dict)
_yaml.roundtrip_dump(high_diversity_node, tmpname)
high_diversity_meta_digest = self._cas.add_object(path=tmpname)
artifact.high_diversity_meta.CopyFrom(high_diversity_meta_digest)
size += high_diversity_meta_digest.size_bytes
# store build dependencies
for e in element._dependencies(_Scope.BUILD):
new_build = artifact.build_deps.add()
new_build.project_name = e.project_name
new_build.element_name = e.name
new_build.cache_key = e._get_cache_key()
new_build.was_workspaced = bool(e._get_workspace())
# Store log file
log_filename = context.messenger.get_log_filename()
if log_filename:
digest = self._cas.add_object(path=log_filename)
log = artifact.logs.add()
log.name = os.path.basename(log_filename)
log.digest.CopyFrom(digest)
size += log.digest.size_bytes
# Store build tree
if sandbox_build_dir is not None:
buildtreevdir = CasBasedDirectory(cas_cache=self._cas)
buildtreevdir._import_files_internal(sandbox_build_dir, properties=properties)
artifact.buildtree.CopyFrom(buildtreevdir._get_digest())
size += buildtreevdir._get_size()
# Store sources
if sourcesvdir is not None:
artifact.sources.CopyFrom(sourcesvdir._get_digest())
size += sourcesvdir._get_size()
os.makedirs(os.path.dirname(os.path.join(self._artifactdir, element.get_artifact_name())), exist_ok=True)
keys = utils._deduplicate([self._cache_key, self._weak_cache_key])
for key in keys:
path = os.path.join(self._artifactdir, element.get_artifact_name(key=key))
with utils.save_file_atomic(path, mode="wb") as f:
f.write(artifact.SerializeToString())
return size
# cached_buildtree()
#
# Check if artifact is cached with expected buildtree. A
# buildtree will not be present if the rest of the partial artifact
# is not cached.
#
# Returns:
# (bool): True if artifact cached with buildtree, False if
# missing expected buildtree. Note this only confirms
# if a buildtree is present, not its contents.
#
def cached_buildtree(self):
buildtree_digest = self._get_field_digest("buildtree")
if buildtree_digest:
return self._cas.contains_directory(buildtree_digest, with_files=True)
else:
return False
# buildtree_exists()
#
# Check if artifact was created with a buildtree. This does not check
# whether the buildtree is present in the local cache.
#
# Returns:
# (bool): True if artifact was created with buildtree
#
def buildtree_exists(self):
artifact = self._get_proto()
return bool(str(artifact.buildtree))
# cached_sources()
#
# Check if artifact is cached with sources.
#
# Returns:
# (bool): True if artifact is cached with sources, False if sources
# are not available.
#
def cached_sources(self):
sources_digest = self._get_field_digest("sources")
if sources_digest:
return self._cas.contains_directory(sources_digest, with_files=True)
else:
return False
# load_public_data():
#
# Loads the public data from the cached artifact
#
# Returns:
# (dict): The artifacts cached public data
#
def load_public_data(self):
# Load the public data from the artifact
artifact = self._get_proto()
with self._cas.open(artifact.public_data) as meta_file:
meta_str = meta_file.read()
data = _yaml.load_data(meta_str, file_name="public.yaml")
return data
# load_sandbox_config():
#
# Loads the sandbox configuration from the cached artifact
#
# Returns:
# The stored SandboxConfig object
#
def load_sandbox_config(self) -> SandboxConfig:
# Load the sandbox data from the artifact
artifact = self._get_proto()
meta_file = self._cas.objpath(artifact.low_diversity_meta)
data = _yaml.load(meta_file, shortname="low-diversity-meta.yaml")
# Extract the sandbox data
config = data.get_mapping("sandbox-config")
# Return a SandboxConfig
return SandboxConfig.new_from_node(config)
# load_environment():
#
# Loads the environment variables from the cached artifact
#
# Returns:
# The environment variables
#
def load_environment(self) -> Dict[str, str]:
# Load the sandbox data from the artifact
artifact = self._get_proto()
meta_file = self._cas.objpath(artifact.low_diversity_meta)
data = _yaml.load(meta_file, shortname="low-diversity-meta.yaml")
# Extract the environment
config = data.get_mapping("environment")
# Return the environment
return config.strip_node_info()
# load_variables():
#
# Loads the element variables from the cached artifact
#
# Returns:
# The element variables
#
def load_variables(self) -> Variables:
# Load the sandbox data from the artifact
artifact = self._get_proto()
meta_file = self._cas.objpath(artifact.high_diversity_meta)
data = _yaml.load(meta_file, shortname="high-diversity-meta.yaml")
# Extract the variables node and return the new Variables instance
variables_node = data.get_mapping("variables")
return Variables(variables_node)
# load_build_result():
#
# Load the build result from the cached artifact
#
# Returns:
# (bool): Whether the artifact of this element present in the artifact cache is of a success
# (str): Short description of the result
# (str): Detailed description of the result
#
def load_build_result(self):
artifact = self._get_proto()
build_result = (artifact.build_success, artifact.build_error, artifact.build_error_details)
return build_result
# get_metadata_keys():
#
# Retrieve the strong and weak keys from the given artifact.
#
# Returns:
# The strong key
# The strict key
# The weak key
#
def get_metadata_keys(self) -> Tuple[str, str, str]:
if self._metadata_keys is not None:
return self._metadata_keys
# Extract proto
artifact = self._get_proto()
strong_key = artifact.strong_key
strict_key = artifact.strict_key
weak_key = artifact.weak_key
self._metadata_keys = (strong_key, strict_key, weak_key)
return self._metadata_keys
# get_metadata_workspaced():
#
# Retrieve the hash of dependency from the given artifact.
#
# Returns:
# (bool): Whether the given artifact was workspaced
#
def get_metadata_workspaced(self):
if self._metadata_workspaced is not None:
return self._metadata_workspaced
# Extract proto
artifact = self._get_proto()
self._metadata_workspaced = artifact.was_workspaced
return self._metadata_workspaced
# get_metadata_workspaced_dependencies():
#
# Retrieve the hash of workspaced dependencies keys from the given artifact.
#
# Returns:
# (list): List of which dependencies are workspaced
#
def get_metadata_workspaced_dependencies(self):
if self._metadata_workspaced_dependencies is not None:
return self._metadata_workspaced_dependencies
# Extract proto
artifact = self._get_proto()
self._metadata_workspaced_dependencies = [
dep.element_name for dep in artifact.build_deps if dep.was_workspaced
]
return self._metadata_workspaced_dependencies
# get_dependency_artifact_names()
#
# Retrieve the artifact names of all of the dependencies in _Scope.BUILD
#
# Returns:
# (list [str]): A list of refs of all build dependencies in staging order.
#
def get_dependency_artifact_names(self):
# XXX: The pylint disable is necessary due to upstream issue:
# https://github.com/PyCQA/pylint/issues/850
from .element import _get_normal_name # pylint: disable=cyclic-import
artifact = self._get_proto()
try:
dependency_refs = [
os.path.join(dep.project_name, _get_normal_name(dep.element_name), dep.cache_key)
for dep in artifact.build_deps
]
except AttributeError:
# If the artifact has no dependencies, the build_deps attribute
# will be missing from the proto.
dependency_refs = []
return dependency_refs
# query_cache():
#
# Check whether the artifact corresponding to the stored cache key is
# available. This also checks whether all required parts of the artifact
# are available, which may depend on command and configuration. The cache
# key used for querying is dependent on the current context.
#
# Returns:
# (bool): Whether artifact is in local cache
#
def query_cache(self):
artifact = self._load_proto()
if not artifact:
self._cached = False
return False
# Check whether 'files' subdirectory is available, with or without file contents
if str(artifact.files) and not self._cas.contains_directory(artifact.files, with_files=True):
self._cached = False
return False
# Check whether public data and logs are available
logfile_digests = [logfile.digest for logfile in artifact.logs]
digests = [artifact.low_diversity_meta, artifact.high_diversity_meta, artifact.public_data] + logfile_digests
if not self._cas.contains_files(digests):
self._cached = False
return False
self._proto = artifact
self._cached = True
return True
# cached()
#
# Return whether the artifact is available in the local cache. This must
# be called after `query_cache()` or `set_cached()`.
#
# Returns:
# (bool): Whether artifact is in local cache
#
def cached(self, *, buildtree=False):
assert self._cached is not None
ret = self._cached
if buildtree:
ret = ret and (self.cached_buildtree() or not self.buildtree_exists())
return ret
# cached_logs()
#
# Check if the artifact is cached with log files.
#
# Returns:
# (bool): True if artifact is cached with logs, False if
# element not cached or missing logs.
#
def cached_logs(self):
# Log files are currently considered an essential part of an artifact.
# If the artifact is cached, its log files are available as well.
return self._element._cached()
# set_cached()
#
# Mark the artifact as cached without querying the filesystem.
# This is used as optimization when we know the artifact is available.
#
def set_cached(self):
self._proto = self._load_proto()
assert self._proto
self._cached = True
# pull()
#
# Pull artifact from remote artifact repository into local artifact cache.
#
# Args:
# pull_buildtrees (bool): Whether to pull buildtrees or not
#
# Returns: True if the artifact has been downloaded, False otherwise
#
def pull(self, *, pull_buildtrees):
artifacts = self._context.artifactcache
pull_key = self.get_extract_key()
if not artifacts.pull(self._element, pull_key, pull_buildtrees=pull_buildtrees):
return False
self.set_cached()
# Add reference for the other key (weak key when pulling with strong key,
# strong key when pulling with weak key)
for key in self.get_metadata_keys():
artifacts.link_key(self._element, pull_key, key)
return True
# load_proto()
#
# Returns:
# (Artifact): Artifact proto
#
def _load_proto(self):
key = self.get_extract_key()
proto_path = os.path.join(self._artifactdir, self._element.get_artifact_name(key=key))
artifact = ArtifactProto()
try:
with open(proto_path, mode="r+b") as f:
artifact.ParseFromString(f.read())
except FileNotFoundError:
return None
os.utime(proto_path)
return artifact
# _get_proto()
#
# Returns:
# (Artifact): Artifact proto
#
def _get_proto(self):
return self._proto
# _get_field_digest()
#
# Returns:
# (Digest): Digest of field specified
#
def _get_field_digest(self, field):
artifact_proto = self._get_proto()
digest = getattr(artifact_proto, field)
if not str(digest):
return None
return digest
| true | true |
f72f3137c35f25ad4295e8ff69e5522c6bc0fa3c | 466 | py | Python | database.py | Karasiq/flask-apachan | 11e1e6009910dfa762c8009e6ce96dc1407160b1 | [
"MIT"
] | null | null | null | database.py | Karasiq/flask-apachan | 11e1e6009910dfa762c8009e6ce96dc1407160b1 | [
"MIT"
] | null | null | null | database.py | Karasiq/flask-apachan | 11e1e6009910dfa762c8009e6ce96dc1407160b1 | [
"MIT"
] | null | null | null | from flask.ext.sqlalchemy import SQLAlchemy
import os, sys
sys.path.append(os.getcwd())
sys.path.append(os.path.join(os.getcwd(), '..'))
from app import app
db = SQLAlchemy(app)
db_session = db.session
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import models
db.Model.metadata.create_all() | 33.285714 | 66 | 0.736052 | from flask.ext.sqlalchemy import SQLAlchemy
import os, sys
sys.path.append(os.getcwd())
sys.path.append(os.path.join(os.getcwd(), '..'))
from app import app
db = SQLAlchemy(app)
db_session = db.session
def init_db():
import models
db.Model.metadata.create_all() | true | true |
f72f3282c9de4d076ead9963256ae05c8ffa5ed1 | 14,015 | py | Python | main_sweep.py | neonkitchen/disentangling-vae | e33148469fd1da50aef35686f337f2452f08ff6e | [
"MIT"
] | 2 | 2021-01-07T17:43:30.000Z | 2021-03-23T18:32:49.000Z | main_sweep.py | neonkitchen/disentangling-vae | e33148469fd1da50aef35686f337f2452f08ff6e | [
"MIT"
] | null | null | null | main_sweep.py | neonkitchen/disentangling-vae | e33148469fd1da50aef35686f337f2452f08ff6e | [
"MIT"
] | null | null | null | import argparse
import logging
import sys
import os
import wandb
from configparser import ConfigParser
from torch import optim
from disvae import init_specific_model, Trainer, Evaluator
from disvae.utils.modelIO import save_model, load_model, load_metadata
from disvae.models.losses import LOSSES, RECON_DIST, get_loss_f
from disvae.models.vae import MODELS
from utils.datasets import get_dataloaders, get_img_size, DATASETS
from utils.helpers import (create_safe_directory, get_device, set_seed, get_n_param,
get_config_section, update_namespace_, FormatterNoDuplicate)
from utils.visualize import GifTraversalsTraining
CONFIG_FILE = "hyperparam_sweep.ini"
RES_DIR = "results"
LOG_LEVELS = list(logging._levelToName.values())
ADDITIONAL_EXP = ['custom', "debug", "best_celeba", "best_dsprites"]
EXPERIMENTS = ADDITIONAL_EXP + ["{}_{}".format(loss, data)
for loss in LOSSES
for data in DATASETS]
def parse_arguments(args_to_parse):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
default_config = get_config_section([CONFIG_FILE], "sweep")
description = "PyTorch implementation and evaluation of disentangled Variational AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description,
formatter_class=FormatterNoDuplicate)
### General options argument group
general = parser.add_argument_group('General options')
general.add_argument('name', type=str,
help="Name of the model for storing and loading purposes.")
general.add_argument('-L', '--log-level', help="Logging levels.",
default=default_config['log_level'], choices=LOG_LEVELS)
general.add_argument('--no-progress-bar', action='store_true',
default=default_config['no_progress_bar'],
help='Disables progress bar.')
general.add_argument('--no-cuda', action='store_true',
default=default_config['no_cuda'],
help='Disables CUDA training, even when have one.')
general.add_argument('-s', '--seed', type=int, default=default_config['seed'],
help='Random seed. Can be `None` for stochastic behavior.')
### Training specific options argument group
training = parser.add_argument_group('Training specific options')
training.add_argument('--checkpoint-every',
type=int, default=default_config['checkpoint_every'],
help='Save a checkpoint of the trained model every n epoch.')
training.add_argument('-d', '--dataset', help="Path to training data.",
default=default_config['dataset'], choices=DATASETS)
training.add_argument('-x', '--experiment',
default=default_config['experiment'], choices=EXPERIMENTS,
help='Predefined experiments to run. If not `custom` this will overwrite some other arguments.')
training.add_argument('-e', '--epochs', type=int,
default=default_config['epochs'],
help='Maximum number of epochs to run for.')
training.add_argument('-b', '--batch-size', type=int,
default=default_config['batch_size'],
help='Batch size for training.')
training.add_argument('--lr', type=float, default=default_config['lr'],
help='Learning rate.')
### Model specfic options argument group
model = parser.add_argument_group('Model specfic options')
model.add_argument('-m', '--model-type',
default=default_config['model'], choices=MODELS,
help='Type of encoder and decoder to use.')
model.add_argument('-z', '--latent-dim', type=int,
default=default_config['latent_dim'],
help='Dimension of the latent variable.')
model.add_argument('-l', '--loss',
default=default_config['loss'], choices=LOSSES,
help="Type of VAE loss function to use.")
model.add_argument('-r', '--rec-dist', default=default_config['rec_dist'],
choices=RECON_DIST,
help="Form of the likelihood to use for each pixel.")
model.add_argument('-a', '--reg-anneal', type=float,
default=default_config['reg_anneal'],
help="Number of annealing steps where gradually adding the regularisation. What is annealed is specific to each loss.")
### Loss options in various argument groups
### BetaH loss specific argument group
betaH = parser.add_argument_group('BetaH specific parameters')
betaH.add_argument('--betaH-B', type=float,
default=default_config['betaH_B'],
help="Weight of the KL (beta in the paper).")
### BetaB loss specific argument group
betaB = parser.add_argument_group('BetaB specific parameters')
betaB.add_argument('--betaB-initC', type=float,
default=default_config['betaB_initC'],
help="Starting annealed capacity.")
betaB.add_argument('--betaB-finC', type=float,
default=default_config['betaB_finC'],
help="Final annealed capacity.")
betaB.add_argument('--betaB-G', type=float,
default=default_config['betaB_G'],
help="Weight of the KL divergence term (gamma in the paper).")
### factor loss specific argument group
factor = parser.add_argument_group('factor VAE specific parameters')
factor.add_argument('--factor-G', type=float,
default=default_config['factor_G'],
help="Weight of the TC term (gamma in the paper).")
factor.add_argument('--lr-disc', type=float,
default=default_config['lr_disc'],
help='Learning rate of the discriminator.')
### btcvae loss specific argument group
btcvae = parser.add_argument_group('beta-tcvae specific parameters')
btcvae.add_argument('--btcvae-A', type=float,
default=default_config['btcvae_A'],
help="Weight of the MI term (alpha in the paper).")
btcvae.add_argument('--btcvae-G', type=float,
default=default_config['btcvae_G'],
help="Weight of the dim-wise KL term (gamma in the paper).")
btcvae.add_argument('--btcvae-B', type=float,
default=default_config['btcvae_B'],
help="Weight of the TC term (beta in the paper).")
### Evaluation specific options argument group
evaluation = parser.add_argument_group('Evaluation specific options')
evaluation.add_argument('--is-eval-only', action='store_true',
default=default_config['is_eval_only'],
help='Whether to only evaluate using precomputed model `name`.')
evaluation.add_argument('--is-metrics', action='store_true',
default=default_config['is_metrics'],
help="Whether to compute the disentangled metrcics. Currently only possible with `dsprites` as it is the only dataset with known true factors of variations.")
evaluation.add_argument('--no-test', action='store_true',
default=default_config['no_test'],
help="Whether not to compute the test losses.`")
evaluation.add_argument('--eval-batchsize', type=int,
default=default_config['eval_batchsize'],
help='Batch size for evaluation.')
args = parser.parse_args(args_to_parse)
if args.experiment != 'sweep':
if args.experiment not in ADDITIONAL_EXP:
# update all common sections first
model, dataset = args.experiment.split("_")
common_data = get_config_section([CONFIG_FILE], "Common_{}".format(dataset))
update_namespace_(args, common_data)
common_model = get_config_section([CONFIG_FILE], "Common_{}".format(model))
update_namespace_(args, common_model)
try:
experiments_config = get_config_section([CONFIG_FILE], args.experiment)
update_namespace_(args, experiments_config)
except KeyError as e:
if args.experiment in ADDITIONAL_EXP:
raise e # only reraise if didn't use common section
return args
def main(args):
"""Main train and evaluation function.
Parameters
----------
args: argparse.Namespace
Arguments
"""
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(funcName)s: %(message)s',
"%H:%M:%S")
logger = logging.getLogger(__name__)
logger.setLevel(args.log_level.upper())
stream = logging.StreamHandler()
stream.setLevel(args.log_level.upper())
stream.setFormatter(formatter)
logger.addHandler(stream)
set_seed(args.seed)
device = get_device(is_gpu=not args.no_cuda)
exp_dir = os.path.join(RES_DIR, args.name)
logger.info("Root directory for saving and loading experiments: {}".format(exp_dir))
# Initialize a new sweep
# Arguments:
# – sweep_config: the sweep config dictionary defined above
# – entity: Set the username for the sweep
# – project: Set the project name for the sweep
config = wandb.config
with wandb.init(name="sweep-reg_anneal-seed",
notes='This is a test run',
tags=['btcvae', 'dsprites'],
entity='neonkitchen',
project]'sweep'
config = config):
wandb.config.update(args)
if not args.is_eval_only:
create_safe_directory(exp_dir, logger=logger)
if args.loss == "factor":
logger.info("FactorVae needs 2 batches per iteration. To replicate this behavior while being consistent, we double the batch size and the the number of epochs.")
args.batch_size *= 2
args.epochs *= 2
# PREPARES DATA
train_loader = get_dataloaders(args.dataset,
batch_size=args.batch_size,
logger=logger)
logger.info("Train {} with {} samples".format(args.dataset, len(train_loader.dataset)))
# PREPARES MODEL
args.img_size = get_img_size(args.dataset) # stores for metadata
model = init_specific_model(args.model_type, args.img_size, args.latent_dim)
logger.info('Num parameters in model: {}'.format(get_n_param(model)))
# TRAINS
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model = model.to(device) # make sure trainer and viz on same device
gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)
loss_f = get_loss_f(args.loss,
n_data=len(train_loader.dataset),
device=device,
**vars(args))
wandb.watch(model, optimizer, log="parameters", log_freq=1000)
trainer = Trainer(model, optimizer, loss_f=loss_f,
loss_name=args.loss,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar,
gif_visualizer=gif_visualizer)
trainer(train_loader,
epochs=args.epochs,
checkpoint_every=args.checkpoint_every)
# SAVE MODEL AND EXPERIMENT INFORMATION
save_model(trainer.model, exp_dir, metadata=vars(args))
if args.is_metrics or not args.no_test:
model = load_model(exp_dir, is_gpu=not args.no_cuda)
metadata = load_metadata(exp_dir)
# TO-DO: currently uses train datatset
test_loader = get_dataloaders(metadata["dataset"],
batch_size=args.eval_batchsize,
shuffle=False,
logger=logger)
loss_f = get_loss_f(args.loss,
n_data=len(test_loader.dataset),
device=device,
**vars(args))
evaluator = Evaluator(model, loss_f,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar)
evaluator(test_loader, is_metrics=args.is_metrics, is_losses=not args.no_test)
sweep_config = {
'method': 'grid', #grid, random
#'metric': {
# 'name': 'loss',
#'goal': 'minimise'
#},
'parameters': {
'seed': {
'values': [1234, 9876, 5678]
},
'reg_anneal': {
'values': [100000, 200000, 300000]#, 400000, 500000, 600000, 700000, 800000, 900000 , 1000000]
}
}
}
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
sweep_id = wandb.sweep(sweep_config, entity="sweep", project="sweep")
wandb.agent(sweep_id, main(args))
| 42.990798 | 186 | 0.57888 | import argparse
import logging
import sys
import os
import wandb
from configparser import ConfigParser
from torch import optim
from disvae import init_specific_model, Trainer, Evaluator
from disvae.utils.modelIO import save_model, load_model, load_metadata
from disvae.models.losses import LOSSES, RECON_DIST, get_loss_f
from disvae.models.vae import MODELS
from utils.datasets import get_dataloaders, get_img_size, DATASETS
from utils.helpers import (create_safe_directory, get_device, set_seed, get_n_param,
get_config_section, update_namespace_, FormatterNoDuplicate)
from utils.visualize import GifTraversalsTraining
CONFIG_FILE = "hyperparam_sweep.ini"
RES_DIR = "results"
LOG_LEVELS = list(logging._levelToName.values())
ADDITIONAL_EXP = ['custom', "debug", "best_celeba", "best_dsprites"]
EXPERIMENTS = ADDITIONAL_EXP + ["{}_{}".format(loss, data)
for loss in LOSSES
for data in DATASETS]
def parse_arguments(args_to_parse):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
default_config = get_config_section([CONFIG_FILE], "sweep")
description = "PyTorch implementation and evaluation of disentangled Variational AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description,
formatter_class=FormatterNoDuplicate)
general.add_argument('name', type=str,
help="Name of the model for storing and loading purposes.")
general.add_argument('-L', '--log-level', help="Logging levels.",
default=default_config['log_level'], choices=LOG_LEVELS)
general.add_argument('--no-progress-bar', action='store_true',
default=default_config['no_progress_bar'],
help='Disables progress bar.')
general.add_argument('--no-cuda', action='store_true',
default=default_config['no_cuda'],
help='Disables CUDA training, even when have one.')
general.add_argument('-s', '--seed', type=int, default=default_config['seed'],
help='Random seed. Can be `None` for stochastic behavior.')
add_argument('--checkpoint-every',
type=int, default=default_config['checkpoint_every'],
help='Save a checkpoint of the trained model every n epoch.')
training.add_argument('-d', '--dataset', help="Path to training data.",
default=default_config['dataset'], choices=DATASETS)
training.add_argument('-x', '--experiment',
default=default_config['experiment'], choices=EXPERIMENTS,
help='Predefined experiments to run. If not `custom` this will overwrite some other arguments.')
training.add_argument('-e', '--epochs', type=int,
default=default_config['epochs'],
help='Maximum number of epochs to run for.')
training.add_argument('-b', '--batch-size', type=int,
default=default_config['batch_size'],
help='Batch size for training.')
training.add_argument('--lr', type=float, default=default_config['lr'],
help='Learning rate.')
d_argument('-m', '--model-type',
default=default_config['model'], choices=MODELS,
help='Type of encoder and decoder to use.')
model.add_argument('-z', '--latent-dim', type=int,
default=default_config['latent_dim'],
help='Dimension of the latent variable.')
model.add_argument('-l', '--loss',
default=default_config['loss'], choices=LOSSES,
help="Type of VAE loss function to use.")
model.add_argument('-r', '--rec-dist', default=default_config['rec_dist'],
choices=RECON_DIST,
help="Form of the likelihood to use for each pixel.")
model.add_argument('-a', '--reg-anneal', type=float,
default=default_config['reg_anneal'],
help="Number of annealing steps where gradually adding the regularisation. What is annealed is specific to each loss.")
_config['betaH_B'],
help="Weight of the KL (beta in the paper).")
aB.add_argument('--betaB-initC', type=float,
default=default_config['betaB_initC'],
help="Starting annealed capacity.")
betaB.add_argument('--betaB-finC', type=float,
default=default_config['betaB_finC'],
help="Final annealed capacity.")
betaB.add_argument('--betaB-G', type=float,
default=default_config['betaB_G'],
help="Weight of the KL divergence term (gamma in the paper).")
factor.add_argument('--factor-G', type=float,
default=default_config['factor_G'],
help="Weight of the TC term (gamma in the paper).")
factor.add_argument('--lr-disc', type=float,
default=default_config['lr_disc'],
help='Learning rate of the discriminator.')
btcvae.add_argument('--btcvae-A', type=float,
default=default_config['btcvae_A'],
help="Weight of the MI term (alpha in the paper).")
btcvae.add_argument('--btcvae-G', type=float,
default=default_config['btcvae_G'],
help="Weight of the dim-wise KL term (gamma in the paper).")
btcvae.add_argument('--btcvae-B', type=float,
default=default_config['btcvae_B'],
help="Weight of the TC term (beta in the paper).")
n.add_argument('--is-eval-only', action='store_true',
default=default_config['is_eval_only'],
help='Whether to only evaluate using precomputed model `name`.')
evaluation.add_argument('--is-metrics', action='store_true',
default=default_config['is_metrics'],
help="Whether to compute the disentangled metrcics. Currently only possible with `dsprites` as it is the only dataset with known true factors of variations.")
evaluation.add_argument('--no-test', action='store_true',
default=default_config['no_test'],
help="Whether not to compute the test losses.`")
evaluation.add_argument('--eval-batchsize', type=int,
default=default_config['eval_batchsize'],
help='Batch size for evaluation.')
args = parser.parse_args(args_to_parse)
if args.experiment != 'sweep':
if args.experiment not in ADDITIONAL_EXP:
model, dataset = args.experiment.split("_")
common_data = get_config_section([CONFIG_FILE], "Common_{}".format(dataset))
update_namespace_(args, common_data)
common_model = get_config_section([CONFIG_FILE], "Common_{}".format(model))
update_namespace_(args, common_model)
try:
experiments_config = get_config_section([CONFIG_FILE], args.experiment)
update_namespace_(args, experiments_config)
except KeyError as e:
if args.experiment in ADDITIONAL_EXP:
raise e
return args
def main(args):
"""Main train and evaluation function.
Parameters
----------
args: argparse.Namespace
Arguments
"""
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(funcName)s: %(message)s',
"%H:%M:%S")
logger = logging.getLogger(__name__)
logger.setLevel(args.log_level.upper())
stream = logging.StreamHandler()
stream.setLevel(args.log_level.upper())
stream.setFormatter(formatter)
logger.addHandler(stream)
set_seed(args.seed)
device = get_device(is_gpu=not args.no_cuda)
exp_dir = os.path.join(RES_DIR, args.name)
logger.info("Root directory for saving and loading experiments: {}".format(exp_dir))
# Initialize a new sweep
# Arguments:
# – sweep_config: the sweep config dictionary defined above
# – entity: Set the username for the sweep
# – project: Set the project name for the sweep
config = wandb.config
with wandb.init(name="sweep-reg_anneal-seed",
notes='This is a test run',
tags=['btcvae', 'dsprites'],
entity='neonkitchen',
project]'sweep'
config = config):
wandb.config.update(args)
if not args.is_eval_only:
create_safe_directory(exp_dir, logger=logger)
if args.loss == "factor":
logger.info("FactorVae needs 2 batches per iteration. To replicate this behavior while being consistent, we double the batch size and the the number of epochs.")
args.batch_size *= 2
args.epochs *= 2
# PREPARES DATA
train_loader = get_dataloaders(args.dataset,
batch_size=args.batch_size,
logger=logger)
logger.info("Train {} with {} samples".format(args.dataset, len(train_loader.dataset)))
# PREPARES MODEL
args.img_size = get_img_size(args.dataset) # stores for metadata
model = init_specific_model(args.model_type, args.img_size, args.latent_dim)
logger.info('Num parameters in model: {}'.format(get_n_param(model)))
# TRAINS
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model = model.to(device) # make sure trainer and viz on same device
gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)
loss_f = get_loss_f(args.loss,
n_data=len(train_loader.dataset),
device=device,
**vars(args))
wandb.watch(model, optimizer, log="parameters", log_freq=1000)
trainer = Trainer(model, optimizer, loss_f=loss_f,
loss_name=args.loss,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar,
gif_visualizer=gif_visualizer)
trainer(train_loader,
epochs=args.epochs,
checkpoint_every=args.checkpoint_every)
# SAVE MODEL AND EXPERIMENT INFORMATION
save_model(trainer.model, exp_dir, metadata=vars(args))
if args.is_metrics or not args.no_test:
model = load_model(exp_dir, is_gpu=not args.no_cuda)
metadata = load_metadata(exp_dir)
# TO-DO: currently uses train datatset
test_loader = get_dataloaders(metadata["dataset"],
batch_size=args.eval_batchsize,
shuffle=False,
logger=logger)
loss_f = get_loss_f(args.loss,
n_data=len(test_loader.dataset),
device=device,
**vars(args))
evaluator = Evaluator(model, loss_f,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar)
evaluator(test_loader, is_metrics=args.is_metrics, is_losses=not args.no_test)
sweep_config = {
'method': 'grid', #grid, random
#'metric': {
# 'name': 'loss',
#'goal': 'minimise'
#},
'parameters': {
'seed': {
'values': [1234, 9876, 5678]
},
'reg_anneal': {
'values': [100000, 200000, 300000]#, 400000, 500000, 600000, 700000, 800000, 900000 , 1000000]
}
}
}
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
sweep_id = wandb.sweep(sweep_config, entity="sweep", project="sweep")
wandb.agent(sweep_id, main(args))
| false | true |
f72f337038924789342a29bdf12f894156a4a5c5 | 17,605 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_virtual_machine_images_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2020-05-12T23:29:15.000Z | 2020-05-12T23:29:15.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_virtual_machine_images_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/operations/_virtual_machine_images_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineImagesOperations(object):
"""VirtualMachineImagesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2018-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def get(
self, location, publisher_name, offer, skus, version, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineImage or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImage or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'}
def list(
self, location, publisher_name, offer, skus, expand=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all virtual machine image versions for the specified
location, publisher, offer, and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'}
def list_offers(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image offers for the specified location
and publisher.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_offers.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'}
def list_publishers(
self, location, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image publishers for the specified Azure
location.
:param location: The name of a supported Azure region.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_publishers.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'}
def list_skus(
self, location, publisher_name, offer, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image SKUs for the specified location,
publisher, and offer.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_skus.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'}
| 46.207349 | 205 | 0.663334 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineImagesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def get(
self, location, publisher_name, offer, skus, version, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'}
def list(
self, location, publisher_name, offer, skus, expand=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'}
def list_offers(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
url = self.list_offers.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'}
def list_publishers(
self, location, custom_headers=None, raw=False, **operation_config):
url = self.list_publishers.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'}
def list_skus(
self, location, publisher_name, offer, custom_headers=None, raw=False, **operation_config):
url = self.list_skus.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.