hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7170eb31ebfcf1d90bae6e7d44135dc95875c7b
| 2,701
|
py
|
Python
|
examples/do_full_save_example.py
|
cclauss/hyperparameter_hunter
|
12fcbbd4ed71447f80e41f3538be824126b3e209
|
[
"MIT"
] | null | null | null |
examples/do_full_save_example.py
|
cclauss/hyperparameter_hunter
|
12fcbbd4ed71447f80e41f3538be824126b3e209
|
[
"MIT"
] | null | null | null |
examples/do_full_save_example.py
|
cclauss/hyperparameter_hunter
|
12fcbbd4ed71447f80e41f3538be824126b3e209
|
[
"MIT"
] | null | null | null |
from hyperparameter_hunter import Environment, CrossValidationExperiment
from hyperparameter_hunter.utils.learning_utils import get_toy_classification_data
from sklearn.model_selection import RepeatedStratifiedKFold
from xgboost import XGBClassifier
def do_full_save(experiment_result):
"""This is a simple check to see if the final OOF ROC-AUC score is above 0.75. If it is, we return True; otherwise, we return
False. As input, your do_full_save functions should expect an Experiment's result dictionary. This is actually the dictionary
that gets saved as the Experiment's "description" file, so for more information on what's in there, look at any description
file or see :attr:`hyperparameter_hunter.recorders.DescriptionRecorder.result` (the object passed to `do_full_save`)"""
return experiment_result['final_evaluations']['oof']['roc_auc_score'] > 0.75
def execute():
env = Environment(
train_dataset=get_toy_classification_data(),
root_results_path='HyperparameterHunterAssets',
metrics_map=['roc_auc_score'],
cross_validation_type=RepeatedStratifiedKFold,
cross_validation_params=dict(n_splits=3, n_repeats=2, random_state=32),
do_full_save=do_full_save,
)
experiment_0 = CrossValidationExperiment(model_initializer=XGBClassifier, model_init_params=dict(subsample=0.01))
# Pro Tip: By setting XGBoost's subsample ridiculously low, we can get bad scores on purpose
# Upon completion of this Experiment, we see a warning that not all result files will be saved
# This is because the final score of the Experiment was below our threshold of 0.75
# Specifically, we skipped saving prediction files (OOF, holdout, test, or in-fold), and the heartbeat file
# What still got saved is the Experiment's: key information, leaderboard position, and description file
# These are saved to allow us to use the information for future hyperparameter optimization, and detect repeated Experiments
# Additionally, the Experiment's script backup is saved, but that's because its one of the first things that happens
# For even finer control over what gets saved, use `do_full_save` together with `file_blacklist`
# Now, lets perform another Experiment that does a bit better than our intentionally miserable one
experiment_1 = CrossValidationExperiment(model_initializer=XGBClassifier, model_init_params=dict(subsample=0.5))
# Our second Experiment was executed in the same Environment, so it was still subject to the `do_full_save` constraint
# However, because it scored above 0.75 (hopefully), all of the result files were saved
if __name__ == '__main__':
execute()
| 60.022222
| 129
| 0.775268
|
from hyperparameter_hunter import Environment, CrossValidationExperiment
from hyperparameter_hunter.utils.learning_utils import get_toy_classification_data
from sklearn.model_selection import RepeatedStratifiedKFold
from xgboost import XGBClassifier
def do_full_save(experiment_result):
return experiment_result['final_evaluations']['oof']['roc_auc_score'] > 0.75
def execute():
env = Environment(
train_dataset=get_toy_classification_data(),
root_results_path='HyperparameterHunterAssets',
metrics_map=['roc_auc_score'],
cross_validation_type=RepeatedStratifiedKFold,
cross_validation_params=dict(n_splits=3, n_repeats=2, random_state=32),
do_full_save=do_full_save,
)
experiment_0 = CrossValidationExperiment(model_initializer=XGBClassifier, model_init_params=dict(subsample=0.01))
# Upon completion of this Experiment, we see a warning that not all result files will be saved
# This is because the final score of the Experiment was below our threshold of 0.75
# Specifically, we skipped saving prediction files (OOF, holdout, test, or in-fold), and the heartbeat file
# What still got saved is the Experiment's: key information, leaderboard position, and description file
experiment_1 = CrossValidationExperiment(model_initializer=XGBClassifier, model_init_params=dict(subsample=0.5))
if __name__ == '__main__':
execute()
| true
| true
|
f71710eb7646ca089fb4cdca00db365acd3cf522
| 1,978
|
py
|
Python
|
ucsmsdk/mometa/macpool/MacpoolUniverse.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/macpool/MacpoolUniverse.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/macpool/MacpoolUniverse.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for MacpoolUniverse ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class MacpoolUniverseConsts():
pass
class MacpoolUniverse(ManagedObject):
"""This is MacpoolUniverse class."""
consts = MacpoolUniverseConsts()
naming_props = set([])
mo_meta = MoMeta("MacpoolUniverse", "macpoolUniverse", "mac", VersionMeta.Version101e, "InputOutput", 0x1f, [], ["read-only"], [u'topRoot'], [u'macpoolAddr', u'macpoolFormat'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "MacpoolUniverse", **kwargs)
| 43.955556
| 248
| 0.651668
|
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class MacpoolUniverseConsts():
pass
class MacpoolUniverse(ManagedObject):
consts = MacpoolUniverseConsts()
naming_props = set([])
mo_meta = MoMeta("MacpoolUniverse", "macpoolUniverse", "mac", VersionMeta.Version101e, "InputOutput", 0x1f, [], ["read-only"], [u'topRoot'], [u'macpoolAddr', u'macpoolFormat'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "MacpoolUniverse", **kwargs)
| true
| true
|
f7171159415e7019d10d3a184eb5d2bb629cc8f6
| 7,052
|
py
|
Python
|
image_classification/MLP-Mixer/load_pytorch_weights.py
|
RangeKing/PaddleViT
|
0e25958686e04ed8872cf67fba0dfd6918e9b4dd
|
[
"Apache-2.0"
] | null | null | null |
image_classification/MLP-Mixer/load_pytorch_weights.py
|
RangeKing/PaddleViT
|
0e25958686e04ed8872cf67fba0dfd6918e9b4dd
|
[
"Apache-2.0"
] | null | null | null |
image_classification/MLP-Mixer/load_pytorch_weights.py
|
RangeKing/PaddleViT
|
0e25958686e04ed8872cf67fba0dfd6918e9b4dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""convert pytorch model weights to paddle pdparams"""
import os
import numpy as np
import paddle
import torch
import timm
from mlp_mixer import build_mlp_mixer as build_model
from config import get_config
def print_model_named_params(model):
print('----------------------------------')
for name, param in model.named_parameters():
print(name, param.shape)
print('----------------------------------')
def print_model_named_buffers(model):
print('----------------------------------')
for name, param in model.named_buffers():
print(name, param.shape)
print('----------------------------------')
def torch_to_paddle_mapping(model_name, config):
mapping = [
('stem.proj', 'patch_embed.patch_embed'),
]
for stage_idx in range(config.MODEL.MIXER.DEPTH):
th_prefix = f'blocks.{stage_idx}'
pp_prefix = f'mixer_layers.{stage_idx}'
layer_mapping = [
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp_tokens.fc1', f'{pp_prefix}.mlp_tokens.fc1'),
(f'{th_prefix}.mlp_tokens.fc2', f'{pp_prefix}.mlp_tokens.fc2'),
(f'{th_prefix}.mlp_channels.fc1', f'{pp_prefix}.mlp_channels.fc1'),
(f'{th_prefix}.mlp_channels.fc2', f'{pp_prefix}.mlp_channels.fc2'),
]
mapping.extend(layer_mapping)
head_mapping = [
('norm', 'norm'),
('head', 'head'),
]
mapping.extend(head_mapping)
return mapping
def convert(torch_model, paddle_model, model_name, config):
def _set_value(th_name, pd_name, transpose=True):
th_shape = th_params[th_name].shape
pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list
#assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'
print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')
if isinstance(th_params[th_name], torch.nn.parameter.Parameter):
value = th_params[th_name].data.numpy()
else:
value = th_params[th_name].numpy()
if len(value.shape) == 2 and transpose:
value = value.transpose((1, 0))
pd_params[pd_name].set_value(value)
# 1. get paddle and torch model parameters
pd_params = {}
th_params = {}
for name, param in paddle_model.named_parameters():
pd_params[name] = param
for name, param in torch_model.named_parameters():
th_params[name] = param
for name, param in paddle_model.named_buffers():
pd_params[name] = param
for name, param in torch_model.named_buffers():
th_params[name] = param
# 2. get name mapping pairs
mapping = torch_to_paddle_mapping(model_name, config)
missing_keys_th = []
missing_keys_pd = []
zip_map = list(zip(*mapping))
th_keys = list(zip_map[0])
pd_keys = list(zip_map[1])
for key in th_params:
missing = False
if key not in th_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in th_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in th_keys:
missing = False
if missing:
missing_keys_th.append(key)
for key in pd_params:
missing = False
if key not in pd_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in pd_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in pd_keys:
missing = False
if missing:
missing_keys_pd.append(key)
print('====================================')
print('missing_keys_pytorch:')
print(missing_keys_th)
print('missing_keys_paddle:')
print(missing_keys_pd)
print('====================================')
# 3. set torch param values to paddle params: may needs transpose on weights
for th_name, pd_name in mapping:
if th_name in th_params and pd_name in pd_params: # nn.Parameters
_set_value(th_name, pd_name)
else:
if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:
th_name_w = f'{th_name}.weight'
pd_name_w = f'{pd_name}.weight'
_set_value(th_name_w, pd_name_w)
if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:
th_name_b = f'{th_name}.bias'
pd_name_b = f'{pd_name}.bias'
_set_value(th_name_b, pd_name_b)
return paddle_model
def main():
paddle.set_device('cpu')
model_name_list = [
'mixer_b16_224',
'mixer_l16_224',
]
for model_name in model_name_list:
print(f'============= NOW: {model_name} =============')
sz = 224
config = get_config(f'./configs/{model_name}.yaml')
paddle_model = build_model(config)
paddle_model.eval()
print_model_named_params(paddle_model)
print_model_named_buffers(paddle_model)
print('+++++++++++++++++++++++++++++++++++')
device = torch.device('cpu')
torch_model = timm.create_model(model_name, pretrained=True)
torch_model = torch_model.to(device)
torch_model.eval()
print_model_named_params(torch_model)
print_model_named_buffers(torch_model)
# convert weights
paddle_model = convert(torch_model, paddle_model, model_name, config)
# check correctness
x = np.random.randn(2, 3, sz, sz).astype('float32')
x_paddle = paddle.to_tensor(x)
x_torch = torch.Tensor(x).to(device)
out_torch = torch_model(x_torch)
out_paddle = paddle_model(x_paddle)
out_torch = out_torch.data.cpu().numpy()
out_paddle = out_paddle.cpu().numpy()
print(out_torch.shape, out_paddle.shape)
print(out_torch[0, 0:100])
print('========================================================')
print(out_paddle[0, 0:100])
assert np.allclose(out_torch, out_paddle, atol = 1e-3)
# save weights for paddle model
model_path = os.path.join(f'./{model_name}.pdparams')
paddle.save(paddle_model.state_dict(), model_path)
print(f'{model_name} done')
print('all done')
if __name__ == "__main__":
main()
| 33.421801
| 86
| 0.591605
|
import os
import numpy as np
import paddle
import torch
import timm
from mlp_mixer import build_mlp_mixer as build_model
from config import get_config
def print_model_named_params(model):
print('----------------------------------')
for name, param in model.named_parameters():
print(name, param.shape)
print('----------------------------------')
def print_model_named_buffers(model):
print('----------------------------------')
for name, param in model.named_buffers():
print(name, param.shape)
print('----------------------------------')
def torch_to_paddle_mapping(model_name, config):
mapping = [
('stem.proj', 'patch_embed.patch_embed'),
]
for stage_idx in range(config.MODEL.MIXER.DEPTH):
th_prefix = f'blocks.{stage_idx}'
pp_prefix = f'mixer_layers.{stage_idx}'
layer_mapping = [
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp_tokens.fc1', f'{pp_prefix}.mlp_tokens.fc1'),
(f'{th_prefix}.mlp_tokens.fc2', f'{pp_prefix}.mlp_tokens.fc2'),
(f'{th_prefix}.mlp_channels.fc1', f'{pp_prefix}.mlp_channels.fc1'),
(f'{th_prefix}.mlp_channels.fc2', f'{pp_prefix}.mlp_channels.fc2'),
]
mapping.extend(layer_mapping)
head_mapping = [
('norm', 'norm'),
('head', 'head'),
]
mapping.extend(head_mapping)
return mapping
def convert(torch_model, paddle_model, model_name, config):
def _set_value(th_name, pd_name, transpose=True):
th_shape = th_params[th_name].shape
pd_shape = tuple(pd_params[pd_name].shape)
print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')
if isinstance(th_params[th_name], torch.nn.parameter.Parameter):
value = th_params[th_name].data.numpy()
else:
value = th_params[th_name].numpy()
if len(value.shape) == 2 and transpose:
value = value.transpose((1, 0))
pd_params[pd_name].set_value(value)
pd_params = {}
th_params = {}
for name, param in paddle_model.named_parameters():
pd_params[name] = param
for name, param in torch_model.named_parameters():
th_params[name] = param
for name, param in paddle_model.named_buffers():
pd_params[name] = param
for name, param in torch_model.named_buffers():
th_params[name] = param
mapping = torch_to_paddle_mapping(model_name, config)
missing_keys_th = []
missing_keys_pd = []
zip_map = list(zip(*mapping))
th_keys = list(zip_map[0])
pd_keys = list(zip_map[1])
for key in th_params:
missing = False
if key not in th_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in th_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in th_keys:
missing = False
if missing:
missing_keys_th.append(key)
for key in pd_params:
missing = False
if key not in pd_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in pd_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in pd_keys:
missing = False
if missing:
missing_keys_pd.append(key)
print('====================================')
print('missing_keys_pytorch:')
print(missing_keys_th)
print('missing_keys_paddle:')
print(missing_keys_pd)
print('====================================')
for th_name, pd_name in mapping:
if th_name in th_params and pd_name in pd_params:
_set_value(th_name, pd_name)
else:
if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:
th_name_w = f'{th_name}.weight'
pd_name_w = f'{pd_name}.weight'
_set_value(th_name_w, pd_name_w)
if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:
th_name_b = f'{th_name}.bias'
pd_name_b = f'{pd_name}.bias'
_set_value(th_name_b, pd_name_b)
return paddle_model
def main():
paddle.set_device('cpu')
model_name_list = [
'mixer_b16_224',
'mixer_l16_224',
]
for model_name in model_name_list:
print(f'============= NOW: {model_name} =============')
sz = 224
config = get_config(f'./configs/{model_name}.yaml')
paddle_model = build_model(config)
paddle_model.eval()
print_model_named_params(paddle_model)
print_model_named_buffers(paddle_model)
print('+++++++++++++++++++++++++++++++++++')
device = torch.device('cpu')
torch_model = timm.create_model(model_name, pretrained=True)
torch_model = torch_model.to(device)
torch_model.eval()
print_model_named_params(torch_model)
print_model_named_buffers(torch_model)
paddle_model = convert(torch_model, paddle_model, model_name, config)
x = np.random.randn(2, 3, sz, sz).astype('float32')
x_paddle = paddle.to_tensor(x)
x_torch = torch.Tensor(x).to(device)
out_torch = torch_model(x_torch)
out_paddle = paddle_model(x_paddle)
out_torch = out_torch.data.cpu().numpy()
out_paddle = out_paddle.cpu().numpy()
print(out_torch.shape, out_paddle.shape)
print(out_torch[0, 0:100])
print('========================================================')
print(out_paddle[0, 0:100])
assert np.allclose(out_torch, out_paddle, atol = 1e-3)
model_path = os.path.join(f'./{model_name}.pdparams')
paddle.save(paddle_model.state_dict(), model_path)
print(f'{model_name} done')
print('all done')
if __name__ == "__main__":
main()
| true
| true
|
f717117743a109d891ba595119728a6bb26c4c8a
| 2,424
|
py
|
Python
|
code/3D_plot/plot3d.py
|
iszhaoxin/Presentation
|
70cebcae6717d39df5041dbfeefe2ebba6a752fa
|
[
"MIT"
] | null | null | null |
code/3D_plot/plot3d.py
|
iszhaoxin/Presentation
|
70cebcae6717d39df5041dbfeefe2ebba6a752fa
|
[
"MIT"
] | null | null | null |
code/3D_plot/plot3d.py
|
iszhaoxin/Presentation
|
70cebcae6717d39df5041dbfeefe2ebba6a752fa
|
[
"MIT"
] | null | null | null |
import igraph
import json
data = []
f = open("./miserables.json", 'r')
f = open("./mygraph.json", 'r')
data = json.loads(f.read())
print(data.keys())
N=len(data['nodes'])
L=len(data['links'])
Edges=[(data['links'][k]['source'], data['links'][k]['target']) for k in range(L)]
G=igraph.Graph(Edges, directed=False)
labels=[]
group=[]
for node in data['nodes']:
labels.append(node['name'])
group.append(node['group'])
layt=G.layout('kk', dim=3)
Xn=[layt[k][0] for k in range(N)]# x-coordinates of nodes
Yn=[layt[k][1] for k in range(N)]# y-coordinates
Zn=[layt[k][2] for k in range(N)]# z-coordinates
Xe=[]
Ye=[]
Ze=[]
for e in Edges:
Xe+=[layt[e[0]][0],layt[e[1]][0], None]# x-coordinates of edge ends
Ye+=[layt[e[0]][1],layt[e[1]][1], None]
Ze+=[layt[e[0]][2],layt[e[1]][2], None]
import plotly.plotly as py
import plotly.graph_objs as go
trace1=go.Scatter3d(x=Xe,y=Ye,z=Ze,
mode='lines',
line=dict(color='rgb(125,125,125)', width=1),
hoverinfo='none'
)
trace2=go.Scatter3d(x=Xn,y=Yn,z=Zn,
mode='markers',name='actors',
marker=dict(symbol='circle',
size=6,
color=group,
colorscale='Viridis',
line=dict(color='rgb(50,50,50)', width=0.5)
),
text=labels,
hoverinfo='text'
)
axis=dict(showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title=''
)
layout = go.Layout(
title="Network of coappearances of characters in Victor Hugo's novel<br> Les Miserables (3D visualization)",
width=1000,
height=1000,
showlegend=False,
scene=dict(
xaxis=dict(axis),
yaxis=dict(axis),
zaxis=dict(axis),
),
margin=dict(
t=100
),
hovermode='closest',
annotations=[
dict(
showarrow=False,
text="Data source: <a href='http://bost.ocks.org/mike/miserables/miserables.json'>[1] miserables.json</a>",
xref='paper',
yref='paper',
x=0,
y=0.1,
xanchor='left',
yanchor='bottom',
font=dict(
size=14
)
)
], )
data=[trace1, trace2]
fig=go.Figure(data=data, layout=layout)
py.sign_in("iszhaoxin", "oRVJmTyCTE79tkx95yf0")
py.iplot(fig, filename='Les-Miserables')
| 24.734694
| 119
| 0.558581
|
import igraph
import json
data = []
f = open("./miserables.json", 'r')
f = open("./mygraph.json", 'r')
data = json.loads(f.read())
print(data.keys())
N=len(data['nodes'])
L=len(data['links'])
Edges=[(data['links'][k]['source'], data['links'][k]['target']) for k in range(L)]
G=igraph.Graph(Edges, directed=False)
labels=[]
group=[]
for node in data['nodes']:
labels.append(node['name'])
group.append(node['group'])
layt=G.layout('kk', dim=3)
Xn=[layt[k][0] for k in range(N)]
Yn=[layt[k][1] for k in range(N)]
Zn=[layt[k][2] for k in range(N)]
Xe=[]
Ye=[]
Ze=[]
for e in Edges:
Xe+=[layt[e[0]][0],layt[e[1]][0], None]
Ye+=[layt[e[0]][1],layt[e[1]][1], None]
Ze+=[layt[e[0]][2],layt[e[1]][2], None]
import plotly.plotly as py
import plotly.graph_objs as go
trace1=go.Scatter3d(x=Xe,y=Ye,z=Ze,
mode='lines',
line=dict(color='rgb(125,125,125)', width=1),
hoverinfo='none'
)
trace2=go.Scatter3d(x=Xn,y=Yn,z=Zn,
mode='markers',name='actors',
marker=dict(symbol='circle',
size=6,
color=group,
colorscale='Viridis',
line=dict(color='rgb(50,50,50)', width=0.5)
),
text=labels,
hoverinfo='text'
)
axis=dict(showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title=''
)
layout = go.Layout(
title="Network of coappearances of characters in Victor Hugo's novel<br> Les Miserables (3D visualization)",
width=1000,
height=1000,
showlegend=False,
scene=dict(
xaxis=dict(axis),
yaxis=dict(axis),
zaxis=dict(axis),
),
margin=dict(
t=100
),
hovermode='closest',
annotations=[
dict(
showarrow=False,
text="Data source: <a href='http://bost.ocks.org/mike/miserables/miserables.json'>[1] miserables.json</a>",
xref='paper',
yref='paper',
x=0,
y=0.1,
xanchor='left',
yanchor='bottom',
font=dict(
size=14
)
)
], )
data=[trace1, trace2]
fig=go.Figure(data=data, layout=layout)
py.sign_in("iszhaoxin", "oRVJmTyCTE79tkx95yf0")
py.iplot(fig, filename='Les-Miserables')
| true
| true
|
f71711cd4743336100f7a85eeb3ee827d1e992c5
| 5,321
|
py
|
Python
|
spotify/views.py
|
Caleb-Shepard/Silent-Disco
|
80f2cf04278d178c149999d643507aa26bee8654
|
[
"MIT"
] | null | null | null |
spotify/views.py
|
Caleb-Shepard/Silent-Disco
|
80f2cf04278d178c149999d643507aa26bee8654
|
[
"MIT"
] | null | null | null |
spotify/views.py
|
Caleb-Shepard/Silent-Disco
|
80f2cf04278d178c149999d643507aa26bee8654
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from .credentials import REDIRECT_URI, CLIENT_SECRET, CLIENT_ID
from rest_framework.views import APIView
from requests import Request, post
from rest_framework import status
from rest_framework.response import Response
from .util import *
from api.models import Room
from .models import Vote
class AuthURL(APIView):
def get(self, request, fornat=None):
scopes = 'user-read-playback-state user-modify-playback-state user-read-currently-playing'
url = Request('GET', 'https://accounts.spotify.com/authorize', params={
'scope': scopes,
'response_type': 'code',
'redirect_uri': REDIRECT_URI,
'client_id': CLIENT_ID
}).prepare().url
return Response({'url': url}, status=status.HTTP_200_OK)
def spotify_callback(request, format=None):
code = request.GET.get('code')
error = request.GET.get('error')
response = post('https://accounts.spotify.com/api/token', data={
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': REDIRECT_URI,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}).json()
access_token = response.get('access_token')
token_type = response.get('token_type')
refresh_token = response.get('refresh_token')
expires_in = response.get('expires_in')
error = response.get('error')
if not request.session.exists(request.session.session_key):
request.session.create()
update_or_create_user_tokens(
request.session.session_key, access_token, token_type, expires_in, refresh_token)
return redirect('frontend:')
class IsAuthenticated(APIView):
def get(self, request, format=None):
is_authenticated = is_spotify_authenticated(
self.request.session.session_key)
return Response({'status': is_authenticated}, status=status.HTTP_200_OK)
class CurrentSong(APIView):
def get(self, request, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)
if room.exists():
room = room[0]
else:
return Response({}, status=status.HTTP_404_NOT_FOUND)
host = room.host
endpoint = "player/currently-playing"
response = execute_spotify_api_request(host, endpoint)
if 'error' in response or 'item' not in response:
return Response({}, status=status.HTTP_204_NO_CONTENT)
item = response.get('item')
duration = item.get('duration_ms')
progress = response.get('progress_ms')
album_cover = item.get('album').get('images')[0].get('url')
is_playing = response.get('is_playing')
song_id = item.get('id')
artist_string = ""
for i, artist in enumerate(item.get('artists')):
if i > 0:
artist_string += ", "
name = artist.get('name')
artist_string += name
votes = len(Vote.objects.filter(room=room, song_id=song_id))
song = {
'title': item.get('name'),
'artist': artist_string,
'duration': duration,
'time': progress,
'image_url': album_cover,
'is_playing': is_playing,
'votes': votes,
'votes_required': room.votes_to_skip,
'id': song_id
}
self.update_room_song(room, song_id)
return Response(song, status=status.HTTP_200_OK)
def update_room_song(self, room, song_id):
current_song = room.current_song
if current_song != song_id:
room.current_song = song_id
room.save(update_fields=['current_song'])
votes = Vote.objects.filter(room=room).delete()
class PauseSong(APIView):
def put(self, response, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
if self.request.session.session_key == room.host or room.guest_can_pause:
pause_song(room.host)
return Response({}, status=status.HTTP_204_NO_CONTENT)
return Response({}, status=status.HTTP_403_FORBIDDEN)
class PlaySong(APIView):
def put(self, response, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
if self.request.session.session_key == room.host or room.guest_can_pause:
play_song(room.host)
return Response({}, status=status.HTTP_204_NO_CONTENT)
return Response({}, status=status.HTTP_403_FORBIDDEN)
class SkipSong(APIView):
def post(self, request, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
votes = Vote.objects.filter(room=room, song_id=room.current_song)
votes_needed = room.votes_to_skip
if self.request.session.session_key == room.host or len(votes) + 1 >= votes_needed:
votes.delete()
skip_song(room.host)
else:
vote = Vote(user=self.request.session.session_key,
room=room, song_id=room.current_song)
vote.save()
return Response({}, status.HTTP_204_NO_CONTENT)
| 34.551948
| 98
| 0.640857
|
from django.shortcuts import render, redirect
from .credentials import REDIRECT_URI, CLIENT_SECRET, CLIENT_ID
from rest_framework.views import APIView
from requests import Request, post
from rest_framework import status
from rest_framework.response import Response
from .util import *
from api.models import Room
from .models import Vote
class AuthURL(APIView):
def get(self, request, fornat=None):
scopes = 'user-read-playback-state user-modify-playback-state user-read-currently-playing'
url = Request('GET', 'https://accounts.spotify.com/authorize', params={
'scope': scopes,
'response_type': 'code',
'redirect_uri': REDIRECT_URI,
'client_id': CLIENT_ID
}).prepare().url
return Response({'url': url}, status=status.HTTP_200_OK)
def spotify_callback(request, format=None):
code = request.GET.get('code')
error = request.GET.get('error')
response = post('https://accounts.spotify.com/api/token', data={
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': REDIRECT_URI,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}).json()
access_token = response.get('access_token')
token_type = response.get('token_type')
refresh_token = response.get('refresh_token')
expires_in = response.get('expires_in')
error = response.get('error')
if not request.session.exists(request.session.session_key):
request.session.create()
update_or_create_user_tokens(
request.session.session_key, access_token, token_type, expires_in, refresh_token)
return redirect('frontend:')
class IsAuthenticated(APIView):
def get(self, request, format=None):
is_authenticated = is_spotify_authenticated(
self.request.session.session_key)
return Response({'status': is_authenticated}, status=status.HTTP_200_OK)
class CurrentSong(APIView):
def get(self, request, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)
if room.exists():
room = room[0]
else:
return Response({}, status=status.HTTP_404_NOT_FOUND)
host = room.host
endpoint = "player/currently-playing"
response = execute_spotify_api_request(host, endpoint)
if 'error' in response or 'item' not in response:
return Response({}, status=status.HTTP_204_NO_CONTENT)
item = response.get('item')
duration = item.get('duration_ms')
progress = response.get('progress_ms')
album_cover = item.get('album').get('images')[0].get('url')
is_playing = response.get('is_playing')
song_id = item.get('id')
artist_string = ""
for i, artist in enumerate(item.get('artists')):
if i > 0:
artist_string += ", "
name = artist.get('name')
artist_string += name
votes = len(Vote.objects.filter(room=room, song_id=song_id))
song = {
'title': item.get('name'),
'artist': artist_string,
'duration': duration,
'time': progress,
'image_url': album_cover,
'is_playing': is_playing,
'votes': votes,
'votes_required': room.votes_to_skip,
'id': song_id
}
self.update_room_song(room, song_id)
return Response(song, status=status.HTTP_200_OK)
def update_room_song(self, room, song_id):
current_song = room.current_song
if current_song != song_id:
room.current_song = song_id
room.save(update_fields=['current_song'])
votes = Vote.objects.filter(room=room).delete()
class PauseSong(APIView):
def put(self, response, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
if self.request.session.session_key == room.host or room.guest_can_pause:
pause_song(room.host)
return Response({}, status=status.HTTP_204_NO_CONTENT)
return Response({}, status=status.HTTP_403_FORBIDDEN)
class PlaySong(APIView):
def put(self, response, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
if self.request.session.session_key == room.host or room.guest_can_pause:
play_song(room.host)
return Response({}, status=status.HTTP_204_NO_CONTENT)
return Response({}, status=status.HTTP_403_FORBIDDEN)
class SkipSong(APIView):
def post(self, request, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
votes = Vote.objects.filter(room=room, song_id=room.current_song)
votes_needed = room.votes_to_skip
if self.request.session.session_key == room.host or len(votes) + 1 >= votes_needed:
votes.delete()
skip_song(room.host)
else:
vote = Vote(user=self.request.session.session_key,
room=room, song_id=room.current_song)
vote.save()
return Response({}, status.HTTP_204_NO_CONTENT)
| true
| true
|
f717123331164f33e20c5a56f3a0048c5419d173
| 9,110
|
py
|
Python
|
tests/test_keydb.py
|
meskio/tuf
|
09c3ceb993d40f7339bbbaf4eae617f95b972708
|
[
"MIT"
] | 1
|
2015-02-16T22:53:00.000Z
|
2015-02-16T22:53:00.000Z
|
tests/test_keydb.py
|
meskio/tuf
|
09c3ceb993d40f7339bbbaf4eae617f95b972708
|
[
"MIT"
] | null | null | null |
tests/test_keydb.py
|
meskio/tuf
|
09c3ceb993d40f7339bbbaf4eae617f95b972708
|
[
"MIT"
] | 1
|
2019-09-12T02:32:54.000Z
|
2019-09-12T02:32:54.000Z
|
#!/usr/bin/env python
"""
<Program Name>
test_keydb.py
<Author>
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
October 2012.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'keydb.py'.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import unittest
import logging
import tuf
import tuf.formats
import tuf.keys
import tuf.keydb
import tuf.log
logger = logging.getLogger('tuf.test_keydb')
# Generate the three keys to use in our test cases.
KEYS = []
for junk in range(3):
KEYS.append(tuf.keys.generate_rsa_key(2048))
class TestKeydb(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
tuf.keydb.clear_keydb()
def test_clear_keydb(self):
# Test condition ensuring 'clear_keydb()' clears the keydb database.
# Test the length of the keydb before and after adding a key.
self.assertEqual(0, len(tuf.keydb._keydb_dict))
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
tuf.keydb._keydb_dict[keyid] = rsakey
self.assertEqual(1, len(tuf.keydb._keydb_dict))
tuf.keydb.clear_keydb()
self.assertEqual(0, len(tuf.keydb._keydb_dict))
# Test condition for unexpected argument.
self.assertRaises(TypeError, tuf.keydb.clear_keydb, 'unexpected_argument')
def test_get_key(self):
# Test conditions using valid 'keyid' arguments.
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
tuf.keydb._keydb_dict[keyid] = rsakey
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
tuf.keydb._keydb_dict[keyid2] = rsakey2
self.assertEqual(rsakey, tuf.keydb.get_key(keyid))
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
self.assertNotEqual(rsakey2, tuf.keydb.get_key(keyid))
self.assertNotEqual(rsakey, tuf.keydb.get_key(keyid2))
# Test conditions using invalid arguments.
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, None)
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, 123)
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, ['123'])
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, {'keyid': '123'})
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, '')
# Test condition using a 'keyid' that has not been added yet.
keyid3 = KEYS[2]['keyid']
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid3)
def test_add_key(self):
# Test conditions using valid 'keyid' arguments.
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
rsakey3 = KEYS[2]
keyid3 = KEYS[2]['keyid']
self.assertEqual(None, tuf.keydb.add_key(rsakey, keyid))
self.assertEqual(None, tuf.keydb.add_key(rsakey2, keyid2))
self.assertEqual(None, tuf.keydb.add_key(rsakey3))
self.assertEqual(rsakey, tuf.keydb.get_key(keyid))
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
self.assertEqual(rsakey3, tuf.keydb.get_key(keyid3))
# Test conditions using arguments with invalid formats.
tuf.keydb.clear_keydb()
rsakey3['keytype'] = 'bad_keytype'
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, None, keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, '', keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, ['123'], keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, {'a': 'b'}, keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, {'keyid': ''})
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, 123)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, False)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, ['keyid'])
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey3, keyid3)
rsakey3['keytype'] = 'rsa'
# Test conditions where keyid does not match the rsakey.
self.assertRaises(tuf.Error, tuf.keydb.add_key, rsakey, keyid2)
self.assertRaises(tuf.Error, tuf.keydb.add_key, rsakey2, keyid)
# Test conditions using keyids that have already been added.
tuf.keydb.add_key(rsakey, keyid)
tuf.keydb.add_key(rsakey2, keyid2)
self.assertRaises(tuf.KeyAlreadyExistsError, tuf.keydb.add_key, rsakey)
self.assertRaises(tuf.KeyAlreadyExistsError, tuf.keydb.add_key, rsakey2)
def test_remove_key(self):
# Test conditions using valid keyids.
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
rsakey3 = KEYS[2]
keyid3 = KEYS[2]['keyid']
tuf.keydb.add_key(rsakey, keyid)
tuf.keydb.add_key(rsakey2, keyid2)
tuf.keydb.add_key(rsakey3, keyid3)
self.assertEqual(None, tuf.keydb.remove_key(keyid))
self.assertEqual(None, tuf.keydb.remove_key(keyid2))
# Ensure the keys were actually removed.
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid)
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid2)
# Test for 'keyid' not in keydb.
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.remove_key, keyid)
# Test condition for unknown key argument.
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.remove_key, '1')
# Test conditions for arguments with invalid formats.
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, None)
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, '')
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, 123)
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, ['123'])
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, {'bad': '123'})
self.assertRaises(tuf.Error, tuf.keydb.remove_key, rsakey3)
def test_create_keydb_from_root_metadata(self):
# Test condition using a valid 'root_metadata' argument.
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
keydict = {keyid: rsakey, keyid2: rsakey2, keyid: rsakey}
# Add a duplicate 'keyid' to log/trigger a 'tuf.KeyAlreadyExistsError'
# block (loading continues).
roledict = {'Root': {'keyids': [keyid], 'threshold': 1},
'Targets': {'keyids': [keyid2], 'threshold': 1}}
version = 8
consistent_snapshot = False
expires = '1985-10-21T01:21:00Z'
tuf.keydb.add_key(rsakey)
root_metadata = tuf.formats.RootFile.make_metadata(version,
expires,
keydict, roledict,
consistent_snapshot)
self.assertEqual(None, tuf.keydb.create_keydb_from_root_metadata(root_metadata))
tuf.keydb.create_keydb_from_root_metadata(root_metadata)
# Ensure 'keyid' and 'keyid2' were added to the keydb database.
self.assertEqual(rsakey, tuf.keydb.get_key(keyid))
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
# Test conditions for arguments with invalid formats.
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, None)
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, '')
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, 123)
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, ['123'])
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, {'bad': '123'})
# Test conditions for correctly formatted 'root_metadata' arguments but
# containing incorrect keyids or key types. In these conditions, the keys
# should not be added to the keydb database and a warning should be logged.
tuf.keydb.clear_keydb()
# 'keyid' does not match 'rsakey2'.
keydict[keyid] = rsakey2
# Key with invalid keytype.
rsakey3 = KEYS[2]
keyid3 = KEYS[2]['keyid']
rsakey3['keytype'] = 'bad_keytype'
keydict[keyid3] = rsakey3
version = 8
expires = '1985-10-21T01:21:00Z'
root_metadata = tuf.formats.RootFile.make_metadata(version,
expires,
keydict, roledict,
consistent_snapshot)
self.assertEqual(None, tuf.keydb.create_keydb_from_root_metadata(root_metadata))
# Ensure only 'keyid2' was added to the keydb database. 'keyid' and
# 'keyid3' should not be stored.
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid)
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid3)
rsakey3['keytype'] = 'rsa'
# Run unit test.
if __name__ == '__main__':
unittest.main()
| 35.866142
| 84
| 0.681668
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import unittest
import logging
import tuf
import tuf.formats
import tuf.keys
import tuf.keydb
import tuf.log
logger = logging.getLogger('tuf.test_keydb')
KEYS = []
for junk in range(3):
KEYS.append(tuf.keys.generate_rsa_key(2048))
class TestKeydb(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
tuf.keydb.clear_keydb()
def test_clear_keydb(self):
self.assertEqual(0, len(tuf.keydb._keydb_dict))
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
tuf.keydb._keydb_dict[keyid] = rsakey
self.assertEqual(1, len(tuf.keydb._keydb_dict))
tuf.keydb.clear_keydb()
self.assertEqual(0, len(tuf.keydb._keydb_dict))
self.assertRaises(TypeError, tuf.keydb.clear_keydb, 'unexpected_argument')
def test_get_key(self):
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
tuf.keydb._keydb_dict[keyid] = rsakey
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
tuf.keydb._keydb_dict[keyid2] = rsakey2
self.assertEqual(rsakey, tuf.keydb.get_key(keyid))
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
self.assertNotEqual(rsakey2, tuf.keydb.get_key(keyid))
self.assertNotEqual(rsakey, tuf.keydb.get_key(keyid2))
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, None)
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, 123)
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, ['123'])
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, {'keyid': '123'})
self.assertRaises(tuf.FormatError, tuf.keydb.get_key, '')
keyid3 = KEYS[2]['keyid']
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid3)
def test_add_key(self):
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
rsakey3 = KEYS[2]
keyid3 = KEYS[2]['keyid']
self.assertEqual(None, tuf.keydb.add_key(rsakey, keyid))
self.assertEqual(None, tuf.keydb.add_key(rsakey2, keyid2))
self.assertEqual(None, tuf.keydb.add_key(rsakey3))
self.assertEqual(rsakey, tuf.keydb.get_key(keyid))
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
self.assertEqual(rsakey3, tuf.keydb.get_key(keyid3))
tuf.keydb.clear_keydb()
rsakey3['keytype'] = 'bad_keytype'
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, None, keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, '', keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, ['123'], keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, {'a': 'b'}, keyid)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, {'keyid': ''})
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, 123)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, False)
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey, ['keyid'])
self.assertRaises(tuf.FormatError, tuf.keydb.add_key, rsakey3, keyid3)
rsakey3['keytype'] = 'rsa'
self.assertRaises(tuf.Error, tuf.keydb.add_key, rsakey, keyid2)
self.assertRaises(tuf.Error, tuf.keydb.add_key, rsakey2, keyid)
tuf.keydb.add_key(rsakey, keyid)
tuf.keydb.add_key(rsakey2, keyid2)
self.assertRaises(tuf.KeyAlreadyExistsError, tuf.keydb.add_key, rsakey)
self.assertRaises(tuf.KeyAlreadyExistsError, tuf.keydb.add_key, rsakey2)
def test_remove_key(self):
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
rsakey3 = KEYS[2]
keyid3 = KEYS[2]['keyid']
tuf.keydb.add_key(rsakey, keyid)
tuf.keydb.add_key(rsakey2, keyid2)
tuf.keydb.add_key(rsakey3, keyid3)
self.assertEqual(None, tuf.keydb.remove_key(keyid))
self.assertEqual(None, tuf.keydb.remove_key(keyid2))
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid)
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid2)
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.remove_key, keyid)
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.remove_key, '1')
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, None)
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, '')
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, 123)
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, ['123'])
self.assertRaises(tuf.FormatError, tuf.keydb.remove_key, {'bad': '123'})
self.assertRaises(tuf.Error, tuf.keydb.remove_key, rsakey3)
def test_create_keydb_from_root_metadata(self):
rsakey = KEYS[0]
keyid = KEYS[0]['keyid']
rsakey2 = KEYS[1]
keyid2 = KEYS[1]['keyid']
keydict = {keyid: rsakey, keyid2: rsakey2, keyid: rsakey}
roledict = {'Root': {'keyids': [keyid], 'threshold': 1},
'Targets': {'keyids': [keyid2], 'threshold': 1}}
version = 8
consistent_snapshot = False
expires = '1985-10-21T01:21:00Z'
tuf.keydb.add_key(rsakey)
root_metadata = tuf.formats.RootFile.make_metadata(version,
expires,
keydict, roledict,
consistent_snapshot)
self.assertEqual(None, tuf.keydb.create_keydb_from_root_metadata(root_metadata))
tuf.keydb.create_keydb_from_root_metadata(root_metadata)
self.assertEqual(rsakey, tuf.keydb.get_key(keyid))
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, None)
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, '')
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, 123)
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, ['123'])
self.assertRaises(tuf.FormatError,
tuf.keydb.create_keydb_from_root_metadata, {'bad': '123'})
tuf.keydb.clear_keydb()
keydict[keyid] = rsakey2
rsakey3 = KEYS[2]
keyid3 = KEYS[2]['keyid']
rsakey3['keytype'] = 'bad_keytype'
keydict[keyid3] = rsakey3
version = 8
expires = '1985-10-21T01:21:00Z'
root_metadata = tuf.formats.RootFile.make_metadata(version,
expires,
keydict, roledict,
consistent_snapshot)
self.assertEqual(None, tuf.keydb.create_keydb_from_root_metadata(root_metadata))
self.assertEqual(rsakey2, tuf.keydb.get_key(keyid2))
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid)
self.assertRaises(tuf.UnknownKeyError, tuf.keydb.get_key, keyid3)
rsakey3['keytype'] = 'rsa'
if __name__ == '__main__':
unittest.main()
| true
| true
|
f71712333b3dfe5f51734a3e9dc773e435f4a887
| 264
|
py
|
Python
|
python/setup.py
|
Mattis3403/lucy_password
|
e0d6db6f3fc5bebadf9e184d3972f09cf58ac1e5
|
[
"MIT"
] | null | null | null |
python/setup.py
|
Mattis3403/lucy_password
|
e0d6db6f3fc5bebadf9e184d3972f09cf58ac1e5
|
[
"MIT"
] | null | null | null |
python/setup.py
|
Mattis3403/lucy_password
|
e0d6db6f3fc5bebadf9e184d3972f09cf58ac1e5
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='lucy_password',
version='0.1',
url='https://github.com/Mattis3403/lucy_password',
license='MIT',
author='Lucy',
author_email='m.seebeck@campus.tu-berlin.de',
description='A Password Manager'
)
| 22
| 54
| 0.67803
|
from setuptools import setup
setup(
name='lucy_password',
version='0.1',
url='https://github.com/Mattis3403/lucy_password',
license='MIT',
author='Lucy',
author_email='m.seebeck@campus.tu-berlin.de',
description='A Password Manager'
)
| true
| true
|
f7171260e741432d5843b14ddc6f794a0f0ae620
| 21,489
|
py
|
Python
|
layers/poky/meta/lib/oeqa/oetest.py
|
dtischler/px30-test
|
55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f
|
[
"Apache-2.0"
] | 53
|
2018-02-28T08:51:32.000Z
|
2022-02-28T06:49:23.000Z
|
meta/lib/oeqa/oetest.py
|
nareshgbhat/luv-yocto
|
48976c54238dda0791e274927371265d259c0e5a
|
[
"MIT"
] | 27
|
2018-01-25T00:26:53.000Z
|
2020-08-09T05:20:04.000Z
|
meta/lib/oeqa/oetest.py
|
nareshgbhat/luv-yocto
|
48976c54238dda0791e274927371265d259c0e5a
|
[
"MIT"
] | 51
|
2018-02-21T04:46:08.000Z
|
2022-03-02T04:20:41.000Z
|
# Copyright (C) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# Main unittest module used by testimage.bbclass
# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime.
# It also has some helper functions and it's responsible for actually starting the tests
import os, re, mmap, sys
import unittest
import inspect
import subprocess
import signal
import shutil
import functools
try:
import bb
except ImportError:
pass
import logging
import oeqa.runtime
# Exported test doesn't require sdkext
try:
import oeqa.sdkext
except ImportError:
pass
from oeqa.utils.decorators import LogResults, gettag, getResults
logger = logging.getLogger("BitBake")
def getVar(obj):
#extend form dict, if a variable didn't exists, need find it in testcase
class VarDict(dict):
def __getitem__(self, key):
return gettag(obj, key)
return VarDict()
def checkTags(tc, tagexp):
return eval(tagexp, None, getVar(tc))
def filterByTagExp(testsuite, tagexp):
if not tagexp:
return testsuite
caseList = []
for each in testsuite:
if not isinstance(each, unittest.BaseTestSuite):
if checkTags(each, tagexp):
caseList.append(each)
else:
caseList.append(filterByTagExp(each, tagexp))
return testsuite.__class__(caseList)
@LogResults
class oeTest(unittest.TestCase):
pscmd = "ps"
longMessage = True
@classmethod
def hasPackage(self, pkg):
"""
True if the full package name exists in the manifest, False otherwise.
"""
return pkg in oeTest.tc.pkgmanifest
@classmethod
def hasPackageMatch(self, match):
"""
True if match exists in the manifest as a regular expression substring,
False otherwise.
"""
for s in oeTest.tc.pkgmanifest:
if re.match(match, s):
return True
return False
@classmethod
def hasFeature(self,feature):
if feature in oeTest.tc.imagefeatures or \
feature in oeTest.tc.distrofeatures:
return True
else:
return False
class oeRuntimeTest(oeTest):
def __init__(self, methodName='runTest'):
self.target = oeRuntimeTest.tc.target
super(oeRuntimeTest, self).__init__(methodName)
def setUp(self):
# Install packages in the DUT
self.tc.install_uninstall_packages(self.id())
# Check if test needs to run
if self.tc.sigterm:
self.fail("Got SIGTERM")
elif (type(self.target).__name__ == "QemuTarget"):
self.assertTrue(self.target.check(), msg = "Qemu not running?")
self.setUpLocal()
# a setup method before tests but after the class instantiation
def setUpLocal(self):
pass
def tearDown(self):
# Uninstall packages in the DUT
self.tc.install_uninstall_packages(self.id(), False)
res = getResults()
# If a test fails or there is an exception dump
# for QemuTarget only
if (type(self.target).__name__ == "QemuTarget" and
(self.id() in res.getErrorList() or
self.id() in res.getFailList())):
self.tc.host_dumper.create_dir(self._testMethodName)
self.tc.host_dumper.dump_host()
self.target.target_dumper.dump_target(
self.tc.host_dumper.dump_dir)
print ("%s dump data stored in %s" % (self._testMethodName,
self.tc.host_dumper.dump_dir))
self.tearDownLocal()
# Method to be run after tearDown and implemented by child classes
def tearDownLocal(self):
pass
def getmodule(pos=2):
# stack returns a list of tuples containg frame information
# First element of the list the is current frame, caller is 1
frameinfo = inspect.stack()[pos]
modname = inspect.getmodulename(frameinfo[1])
#modname = inspect.getmodule(frameinfo[0]).__name__
return modname
def skipModule(reason, pos=2):
modname = getmodule(pos)
if modname not in oeTest.tc.testsrequired:
raise unittest.SkipTest("%s: %s" % (modname, reason))
else:
raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \
"\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \
"\nor the image really doesn't have the required feature/package when it should." % (modname, reason))
def skipModuleIf(cond, reason):
if cond:
skipModule(reason, 3)
def skipModuleUnless(cond, reason):
if not cond:
skipModule(reason, 3)
_buffer_logger = ""
def custom_verbose(msg, *args, **kwargs):
global _buffer_logger
if msg[-1] != "\n":
_buffer_logger += msg
else:
_buffer_logger += msg
try:
bb.plain(_buffer_logger.rstrip("\n"), *args, **kwargs)
except NameError:
logger.info(_buffer_logger.rstrip("\n"), *args, **kwargs)
_buffer_logger = ""
class TestContext(object):
def __init__(self, d, exported=False):
self.d = d
self.testsuites = self._get_test_suites()
if exported:
path = [os.path.dirname(os.path.abspath(__file__))]
extrapath = ""
else:
path = d.getVar("BBPATH").split(':')
extrapath = "lib/oeqa"
self.testslist = self._get_tests_list(path, extrapath)
self.testsrequired = self._get_test_suites_required()
self.filesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runtime/files")
self.corefilesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
self.imagefeatures = d.getVar("IMAGE_FEATURES").split()
self.distrofeatures = d.getVar("DISTRO_FEATURES").split()
# get testcase list from specified file
# if path is a relative path, then relative to build/conf/
def _read_testlist(self, fpath, builddir):
if not os.path.isabs(fpath):
fpath = os.path.join(builddir, "conf", fpath)
if not os.path.exists(fpath):
bb.fatal("No such manifest file: ", fpath)
tcs = []
for line in open(fpath).readlines():
line = line.strip()
if line and not line.startswith("#"):
tcs.append(line)
return " ".join(tcs)
# return test list by type also filter if TEST_SUITES is specified
def _get_tests_list(self, bbpath, extrapath):
testslist = []
type = self._get_test_namespace()
# This relies on lib/ under each directory in BBPATH being added to sys.path
# (as done by default in base.bbclass)
for testname in self.testsuites:
if testname != "auto":
if testname.startswith("oeqa."):
testslist.append(testname)
continue
found = False
for p in bbpath:
if os.path.exists(os.path.join(p, extrapath, type, testname + ".py")):
testslist.append("oeqa." + type + "." + testname)
found = True
break
elif os.path.exists(os.path.join(p, extrapath, type, testname.split(".")[0] + ".py")):
testslist.append("oeqa." + type + "." + testname)
found = True
break
if not found:
bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
if "auto" in self.testsuites:
def add_auto_list(path):
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
for f in files:
module = 'oeqa.' + type + '.' + f[:-3]
if module not in testslist:
testslist.append(module)
for p in bbpath:
testpath = os.path.join(p, 'lib', 'oeqa', type)
bb.debug(2, 'Searching for tests in %s' % testpath)
if os.path.exists(testpath):
add_auto_list(testpath)
return testslist
def getTestModules(self):
"""
Returns all the test modules in the testlist.
"""
import pkgutil
modules = []
for test in self.testslist:
if re.search("\w+\.\w+\.test_\S+", test):
test = '.'.join(t.split('.')[:3])
module = pkgutil.get_loader(test)
modules.append(module)
return modules
def getModulefromID(self, test_id):
"""
Returns the test module based on a test id.
"""
module_name = ".".join(test_id.split(".")[:3])
modules = self.getTestModules()
for module in modules:
if module.name == module_name:
return module
return None
def getTests(self, test):
'''Return all individual tests executed when running the suite.'''
# Unfortunately unittest does not have an API for this, so we have
# to rely on implementation details. This only needs to work
# for TestSuite containing TestCase.
method = getattr(test, '_testMethodName', None)
if method:
# leaf case: a TestCase
yield test
else:
# Look into TestSuite.
tests = getattr(test, '_tests', [])
for t1 in tests:
for t2 in self.getTests(t1):
yield t2
def loadTests(self):
setattr(oeTest, "tc", self)
testloader = unittest.TestLoader()
testloader.sortTestMethodsUsing = None
suites = [testloader.loadTestsFromName(name) for name in self.testslist]
suites = filterByTagExp(suites, getattr(self, "tagexp", None))
# Determine dependencies between suites by looking for @skipUnlessPassed
# method annotations. Suite A depends on suite B if any method in A
# depends on a method on B.
for suite in suites:
suite.dependencies = []
suite.depth = 0
for test in self.getTests(suite):
methodname = getattr(test, '_testMethodName', None)
if methodname:
method = getattr(test, methodname)
depends_on = getattr(method, '_depends_on', None)
if depends_on:
for dep_suite in suites:
if depends_on in [getattr(t, '_testMethodName', None) for t in self.getTests(dep_suite)]:
if dep_suite not in suite.dependencies and \
dep_suite is not suite:
suite.dependencies.append(dep_suite)
break
else:
logger.warning("Test %s was declared as @skipUnlessPassed('%s') but that test is either not defined or not active. Will run the test anyway." %
(test, depends_on))
# Use brute-force topological sort to determine ordering. Sort by
# depth (higher depth = must run later), with original ordering to
# break ties.
def set_suite_depth(suite):
for dep in suite.dependencies:
new_depth = set_suite_depth(dep) + 1
if new_depth > suite.depth:
suite.depth = new_depth
return suite.depth
for index, suite in enumerate(suites):
set_suite_depth(suite)
suite.index = index
def cmp(a, b):
return (a > b) - (a < b)
def cmpfunc(a, b):
return cmp((a.depth, a.index), (b.depth, b.index))
suites.sort(key=functools.cmp_to_key(cmpfunc))
self.suite = testloader.suiteClass(suites)
return self.suite
def runTests(self):
logger.info("Test modules %s" % self.testslist)
if hasattr(self, "tagexp") and self.tagexp:
logger.info("Filter test cases by tags: %s" % self.tagexp)
logger.info("Found %s tests" % self.suite.countTestCases())
runner = unittest.TextTestRunner(verbosity=2)
if 'bb' in sys.modules:
runner.stream.write = custom_verbose
return runner.run(self.suite)
class RuntimeTestContext(TestContext):
def __init__(self, d, target, exported=False):
super(RuntimeTestContext, self).__init__(d, exported)
self.target = target
self.pkgmanifest = {}
manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"),
d.getVar("IMAGE_LINK_NAME") + ".manifest")
nomanifest = d.getVar("IMAGE_NO_MANIFEST")
if nomanifest is None or nomanifest != "1":
try:
with open(manifest) as f:
for line in f:
(pkg, arch, version) = line.strip().split()
self.pkgmanifest[pkg] = (version, arch)
except IOError as e:
bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
def _get_test_namespace(self):
return "runtime"
def _get_test_suites(self):
testsuites = []
manifests = (self.d.getVar("TEST_SUITES_MANIFEST") or '').split()
if manifests:
for manifest in manifests:
testsuites.extend(self._read_testlist(manifest,
self.d.getVar("TOPDIR")).split())
else:
testsuites = self.d.getVar("TEST_SUITES").split()
return testsuites
def _get_test_suites_required(self):
return [t for t in self.d.getVar("TEST_SUITES").split() if t != "auto"]
def loadTests(self):
super(RuntimeTestContext, self).loadTests()
if oeTest.hasPackage("procps"):
oeRuntimeTest.pscmd = "ps -ef"
def extract_packages(self):
"""
Find packages that will be needed during runtime.
"""
modules = self.getTestModules()
bbpaths = self.d.getVar("BBPATH").split(":")
shutil.rmtree(self.d.getVar("TEST_EXTRACTED_DIR"))
shutil.rmtree(self.d.getVar("TEST_PACKAGED_DIR"))
for module in modules:
json_file = self._getJsonFile(module)
if json_file:
needed_packages = self._getNeededPackages(json_file)
self._perform_package_extraction(needed_packages)
def _perform_package_extraction(self, needed_packages):
"""
Extract packages that will be needed during runtime.
"""
import oe.path
extracted_path = self.d.getVar("TEST_EXTRACTED_DIR")
packaged_path = self.d.getVar("TEST_PACKAGED_DIR")
for key,value in needed_packages.items():
packages = ()
if isinstance(value, dict):
packages = (value, )
elif isinstance(value, list):
packages = value
else:
bb.fatal("Failed to process needed packages for %s; "
"Value must be a dict or list" % key)
for package in packages:
pkg = package["pkg"]
rm = package.get("rm", False)
extract = package.get("extract", True)
if extract:
dst_dir = os.path.join(extracted_path, pkg)
else:
dst_dir = os.path.join(packaged_path)
# Extract package and copy it to TEST_EXTRACTED_DIR
pkg_dir = self._extract_in_tmpdir(pkg)
if extract:
# Same package used for more than one test,
# don't need to extract again.
if os.path.exists(dst_dir):
continue
oe.path.copytree(pkg_dir, dst_dir)
shutil.rmtree(pkg_dir)
# Copy package to TEST_PACKAGED_DIR
else:
self._copy_package(pkg)
def _getJsonFile(self, module):
"""
Returns the path of the JSON file for a module, empty if doesn't exitst.
"""
module_file = module.path
json_file = "%s.json" % module_file.rsplit(".", 1)[0]
if os.path.isfile(module_file) and os.path.isfile(json_file):
return json_file
else:
return ""
def _getNeededPackages(self, json_file, test=None):
"""
Returns a dict with needed packages based on a JSON file.
If a test is specified it will return the dict just for that test.
"""
import json
needed_packages = {}
with open(json_file) as f:
test_packages = json.load(f)
for key,value in test_packages.items():
needed_packages[key] = value
if test:
if test in needed_packages:
needed_packages = needed_packages[test]
else:
needed_packages = {}
return needed_packages
def _extract_in_tmpdir(self, pkg):
""""
Returns path to a temp directory where the package was
extracted without dependencies.
"""
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
pm = get_package_manager(self.d, pkg_path)
extract_dir = pm.extract(pkg)
shutil.rmtree(pkg_path)
return extract_dir
def _copy_package(self, pkg):
"""
Copy the RPM, DEB or IPK package to dst_dir
"""
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
dst_dir = self.d.getVar("TEST_PACKAGED_DIR")
pm = get_package_manager(self.d, pkg_path)
pkg_info = pm.package_info(pkg)
file_path = pkg_info[pkg]["filepath"]
shutil.copy2(file_path, dst_dir)
shutil.rmtree(pkg_path)
def install_uninstall_packages(self, test_id, pkg_dir, install):
"""
Check if the test requires a package and Install/Uninstall it in the DUT
"""
test = test_id.split(".")[4]
module = self.getModulefromID(test_id)
json = self._getJsonFile(module)
if json:
needed_packages = self._getNeededPackages(json, test)
if needed_packages:
self._install_uninstall_packages(needed_packages, pkg_dir, install)
def _install_uninstall_packages(self, needed_packages, pkg_dir, install=True):
"""
Install/Uninstall packages in the DUT without using a package manager
"""
if isinstance(needed_packages, dict):
packages = [needed_packages]
elif isinstance(needed_packages, list):
packages = needed_packages
for package in packages:
pkg = package["pkg"]
rm = package.get("rm", False)
extract = package.get("extract", True)
src_dir = os.path.join(pkg_dir, pkg)
# Install package
if install and extract:
self.target.connection.copy_dir_to(src_dir, "/")
# Uninstall package
elif not install and rm:
self.target.connection.delete_dir_structure(src_dir, "/")
class ImageTestContext(RuntimeTestContext):
def __init__(self, d, target, host_dumper):
super(ImageTestContext, self).__init__(d, target)
self.tagexp = d.getVar("TEST_SUITES_TAGS")
self.host_dumper = host_dumper
self.sigterm = False
self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self._sigterm_exception)
def _sigterm_exception(self, signum, stackframe):
bb.warn("TestImage received SIGTERM, shutting down...")
self.sigterm = True
self.target.stop()
def install_uninstall_packages(self, test_id, install=True):
"""
Check if the test requires a package and Install/Uninstall it in the DUT
"""
pkg_dir = self.d.getVar("TEST_EXTRACTED_DIR")
super(ImageTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
class ExportTestContext(RuntimeTestContext):
def __init__(self, d, target, exported=False, parsedArgs={}):
"""
This class is used when exporting tests and when are executed outside OE environment.
parsedArgs can contain the following:
- tag: Filter test by tag.
"""
super(ExportTestContext, self).__init__(d, target, exported)
tag = parsedArgs.get("tag", None)
self.tagexp = tag if tag != None else d.getVar("TEST_SUITES_TAGS")
self.sigterm = None
def install_uninstall_packages(self, test_id, install=True):
"""
Check if the test requires a package and Install/Uninstall it in the DUT
"""
export_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
extracted_dir = self.d.getVar("TEST_EXPORT_EXTRACTED_DIR")
pkg_dir = os.path.join(export_dir, extracted_dir)
super(ExportTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
| 34.828201
| 171
| 0.589557
|
import os, re, mmap, sys
import unittest
import inspect
import subprocess
import signal
import shutil
import functools
try:
import bb
except ImportError:
pass
import logging
import oeqa.runtime
# Exported test doesn't require sdkext
try:
import oeqa.sdkext
except ImportError:
pass
from oeqa.utils.decorators import LogResults, gettag, getResults
logger = logging.getLogger("BitBake")
def getVar(obj):
class VarDict(dict):
def __getitem__(self, key):
return gettag(obj, key)
return VarDict()
def checkTags(tc, tagexp):
return eval(tagexp, None, getVar(tc))
def filterByTagExp(testsuite, tagexp):
if not tagexp:
return testsuite
caseList = []
for each in testsuite:
if not isinstance(each, unittest.BaseTestSuite):
if checkTags(each, tagexp):
caseList.append(each)
else:
caseList.append(filterByTagExp(each, tagexp))
return testsuite.__class__(caseList)
@LogResults
class oeTest(unittest.TestCase):
pscmd = "ps"
longMessage = True
@classmethod
def hasPackage(self, pkg):
return pkg in oeTest.tc.pkgmanifest
@classmethod
def hasPackageMatch(self, match):
for s in oeTest.tc.pkgmanifest:
if re.match(match, s):
return True
return False
@classmethod
def hasFeature(self,feature):
if feature in oeTest.tc.imagefeatures or \
feature in oeTest.tc.distrofeatures:
return True
else:
return False
class oeRuntimeTest(oeTest):
def __init__(self, methodName='runTest'):
self.target = oeRuntimeTest.tc.target
super(oeRuntimeTest, self).__init__(methodName)
def setUp(self):
# Install packages in the DUT
self.tc.install_uninstall_packages(self.id())
# Check if test needs to run
if self.tc.sigterm:
self.fail("Got SIGTERM")
elif (type(self.target).__name__ == "QemuTarget"):
self.assertTrue(self.target.check(), msg = "Qemu not running?")
self.setUpLocal()
# a setup method before tests but after the class instantiation
def setUpLocal(self):
pass
def tearDown(self):
# Uninstall packages in the DUT
self.tc.install_uninstall_packages(self.id(), False)
res = getResults()
# If a test fails or there is an exception dump
# for QemuTarget only
if (type(self.target).__name__ == "QemuTarget" and
(self.id() in res.getErrorList() or
self.id() in res.getFailList())):
self.tc.host_dumper.create_dir(self._testMethodName)
self.tc.host_dumper.dump_host()
self.target.target_dumper.dump_target(
self.tc.host_dumper.dump_dir)
print ("%s dump data stored in %s" % (self._testMethodName,
self.tc.host_dumper.dump_dir))
self.tearDownLocal()
# Method to be run after tearDown and implemented by child classes
def tearDownLocal(self):
pass
def getmodule(pos=2):
# stack returns a list of tuples containg frame information
# First element of the list the is current frame, caller is 1
frameinfo = inspect.stack()[pos]
modname = inspect.getmodulename(frameinfo[1])
#modname = inspect.getmodule(frameinfo[0]).__name__
return modname
def skipModule(reason, pos=2):
modname = getmodule(pos)
if modname not in oeTest.tc.testsrequired:
raise unittest.SkipTest("%s: %s" % (modname, reason))
else:
raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \
"\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \
"\nor the image really doesn't have the required feature/package when it should." % (modname, reason))
def skipModuleIf(cond, reason):
if cond:
skipModule(reason, 3)
def skipModuleUnless(cond, reason):
if not cond:
skipModule(reason, 3)
_buffer_logger = ""
def custom_verbose(msg, *args, **kwargs):
global _buffer_logger
if msg[-1] != "\n":
_buffer_logger += msg
else:
_buffer_logger += msg
try:
bb.plain(_buffer_logger.rstrip("\n"), *args, **kwargs)
except NameError:
logger.info(_buffer_logger.rstrip("\n"), *args, **kwargs)
_buffer_logger = ""
class TestContext(object):
def __init__(self, d, exported=False):
self.d = d
self.testsuites = self._get_test_suites()
if exported:
path = [os.path.dirname(os.path.abspath(__file__))]
extrapath = ""
else:
path = d.getVar("BBPATH").split(':')
extrapath = "lib/oeqa"
self.testslist = self._get_tests_list(path, extrapath)
self.testsrequired = self._get_test_suites_required()
self.filesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runtime/files")
self.corefilesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
self.imagefeatures = d.getVar("IMAGE_FEATURES").split()
self.distrofeatures = d.getVar("DISTRO_FEATURES").split()
def _read_testlist(self, fpath, builddir):
if not os.path.isabs(fpath):
fpath = os.path.join(builddir, "conf", fpath)
if not os.path.exists(fpath):
bb.fatal("No such manifest file: ", fpath)
tcs = []
for line in open(fpath).readlines():
line = line.strip()
if line and not line.startswith("#"):
tcs.append(line)
return " ".join(tcs)
def _get_tests_list(self, bbpath, extrapath):
testslist = []
type = self._get_test_namespace()
for testname in self.testsuites:
if testname != "auto":
if testname.startswith("oeqa."):
testslist.append(testname)
continue
found = False
for p in bbpath:
if os.path.exists(os.path.join(p, extrapath, type, testname + ".py")):
testslist.append("oeqa." + type + "." + testname)
found = True
break
elif os.path.exists(os.path.join(p, extrapath, type, testname.split(".")[0] + ".py")):
testslist.append("oeqa." + type + "." + testname)
found = True
break
if not found:
bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
if "auto" in self.testsuites:
def add_auto_list(path):
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
for f in files:
module = 'oeqa.' + type + '.' + f[:-3]
if module not in testslist:
testslist.append(module)
for p in bbpath:
testpath = os.path.join(p, 'lib', 'oeqa', type)
bb.debug(2, 'Searching for tests in %s' % testpath)
if os.path.exists(testpath):
add_auto_list(testpath)
return testslist
def getTestModules(self):
import pkgutil
modules = []
for test in self.testslist:
if re.search("\w+\.\w+\.test_\S+", test):
test = '.'.join(t.split('.')[:3])
module = pkgutil.get_loader(test)
modules.append(module)
return modules
def getModulefromID(self, test_id):
module_name = ".".join(test_id.split(".")[:3])
modules = self.getTestModules()
for module in modules:
if module.name == module_name:
return module
return None
def getTests(self, test):
method = getattr(test, '_testMethodName', None)
if method:
yield test
else:
tests = getattr(test, '_tests', [])
for t1 in tests:
for t2 in self.getTests(t1):
yield t2
def loadTests(self):
setattr(oeTest, "tc", self)
testloader = unittest.TestLoader()
testloader.sortTestMethodsUsing = None
suites = [testloader.loadTestsFromName(name) for name in self.testslist]
suites = filterByTagExp(suites, getattr(self, "tagexp", None))
for suite in suites:
suite.dependencies = []
suite.depth = 0
for test in self.getTests(suite):
methodname = getattr(test, '_testMethodName', None)
if methodname:
method = getattr(test, methodname)
depends_on = getattr(method, '_depends_on', None)
if depends_on:
for dep_suite in suites:
if depends_on in [getattr(t, '_testMethodName', None) for t in self.getTests(dep_suite)]:
if dep_suite not in suite.dependencies and \
dep_suite is not suite:
suite.dependencies.append(dep_suite)
break
else:
logger.warning("Test %s was declared as @skipUnlessPassed('%s') but that test is either not defined or not active. Will run the test anyway." %
(test, depends_on))
def set_suite_depth(suite):
for dep in suite.dependencies:
new_depth = set_suite_depth(dep) + 1
if new_depth > suite.depth:
suite.depth = new_depth
return suite.depth
for index, suite in enumerate(suites):
set_suite_depth(suite)
suite.index = index
def cmp(a, b):
return (a > b) - (a < b)
def cmpfunc(a, b):
return cmp((a.depth, a.index), (b.depth, b.index))
suites.sort(key=functools.cmp_to_key(cmpfunc))
self.suite = testloader.suiteClass(suites)
return self.suite
def runTests(self):
logger.info("Test modules %s" % self.testslist)
if hasattr(self, "tagexp") and self.tagexp:
logger.info("Filter test cases by tags: %s" % self.tagexp)
logger.info("Found %s tests" % self.suite.countTestCases())
runner = unittest.TextTestRunner(verbosity=2)
if 'bb' in sys.modules:
runner.stream.write = custom_verbose
return runner.run(self.suite)
class RuntimeTestContext(TestContext):
def __init__(self, d, target, exported=False):
super(RuntimeTestContext, self).__init__(d, exported)
self.target = target
self.pkgmanifest = {}
manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"),
d.getVar("IMAGE_LINK_NAME") + ".manifest")
nomanifest = d.getVar("IMAGE_NO_MANIFEST")
if nomanifest is None or nomanifest != "1":
try:
with open(manifest) as f:
for line in f:
(pkg, arch, version) = line.strip().split()
self.pkgmanifest[pkg] = (version, arch)
except IOError as e:
bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
def _get_test_namespace(self):
return "runtime"
def _get_test_suites(self):
testsuites = []
manifests = (self.d.getVar("TEST_SUITES_MANIFEST") or '').split()
if manifests:
for manifest in manifests:
testsuites.extend(self._read_testlist(manifest,
self.d.getVar("TOPDIR")).split())
else:
testsuites = self.d.getVar("TEST_SUITES").split()
return testsuites
def _get_test_suites_required(self):
return [t for t in self.d.getVar("TEST_SUITES").split() if t != "auto"]
def loadTests(self):
super(RuntimeTestContext, self).loadTests()
if oeTest.hasPackage("procps"):
oeRuntimeTest.pscmd = "ps -ef"
def extract_packages(self):
modules = self.getTestModules()
bbpaths = self.d.getVar("BBPATH").split(":")
shutil.rmtree(self.d.getVar("TEST_EXTRACTED_DIR"))
shutil.rmtree(self.d.getVar("TEST_PACKAGED_DIR"))
for module in modules:
json_file = self._getJsonFile(module)
if json_file:
needed_packages = self._getNeededPackages(json_file)
self._perform_package_extraction(needed_packages)
def _perform_package_extraction(self, needed_packages):
import oe.path
extracted_path = self.d.getVar("TEST_EXTRACTED_DIR")
packaged_path = self.d.getVar("TEST_PACKAGED_DIR")
for key,value in needed_packages.items():
packages = ()
if isinstance(value, dict):
packages = (value, )
elif isinstance(value, list):
packages = value
else:
bb.fatal("Failed to process needed packages for %s; "
"Value must be a dict or list" % key)
for package in packages:
pkg = package["pkg"]
rm = package.get("rm", False)
extract = package.get("extract", True)
if extract:
dst_dir = os.path.join(extracted_path, pkg)
else:
dst_dir = os.path.join(packaged_path)
pkg_dir = self._extract_in_tmpdir(pkg)
if extract:
if os.path.exists(dst_dir):
continue
oe.path.copytree(pkg_dir, dst_dir)
shutil.rmtree(pkg_dir)
# Copy package to TEST_PACKAGED_DIR
else:
self._copy_package(pkg)
def _getJsonFile(self, module):
module_file = module.path
json_file = "%s.json" % module_file.rsplit(".", 1)[0]
if os.path.isfile(module_file) and os.path.isfile(json_file):
return json_file
else:
return ""
def _getNeededPackages(self, json_file, test=None):
import json
needed_packages = {}
with open(json_file) as f:
test_packages = json.load(f)
for key,value in test_packages.items():
needed_packages[key] = value
if test:
if test in needed_packages:
needed_packages = needed_packages[test]
else:
needed_packages = {}
return needed_packages
def _extract_in_tmpdir(self, pkg):
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
pm = get_package_manager(self.d, pkg_path)
extract_dir = pm.extract(pkg)
shutil.rmtree(pkg_path)
return extract_dir
def _copy_package(self, pkg):
from oeqa.utils.package_manager import get_package_manager
pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
dst_dir = self.d.getVar("TEST_PACKAGED_DIR")
pm = get_package_manager(self.d, pkg_path)
pkg_info = pm.package_info(pkg)
file_path = pkg_info[pkg]["filepath"]
shutil.copy2(file_path, dst_dir)
shutil.rmtree(pkg_path)
def install_uninstall_packages(self, test_id, pkg_dir, install):
test = test_id.split(".")[4]
module = self.getModulefromID(test_id)
json = self._getJsonFile(module)
if json:
needed_packages = self._getNeededPackages(json, test)
if needed_packages:
self._install_uninstall_packages(needed_packages, pkg_dir, install)
def _install_uninstall_packages(self, needed_packages, pkg_dir, install=True):
if isinstance(needed_packages, dict):
packages = [needed_packages]
elif isinstance(needed_packages, list):
packages = needed_packages
for package in packages:
pkg = package["pkg"]
rm = package.get("rm", False)
extract = package.get("extract", True)
src_dir = os.path.join(pkg_dir, pkg)
# Install package
if install and extract:
self.target.connection.copy_dir_to(src_dir, "/")
# Uninstall package
elif not install and rm:
self.target.connection.delete_dir_structure(src_dir, "/")
class ImageTestContext(RuntimeTestContext):
def __init__(self, d, target, host_dumper):
super(ImageTestContext, self).__init__(d, target)
self.tagexp = d.getVar("TEST_SUITES_TAGS")
self.host_dumper = host_dumper
self.sigterm = False
self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self._sigterm_exception)
def _sigterm_exception(self, signum, stackframe):
bb.warn("TestImage received SIGTERM, shutting down...")
self.sigterm = True
self.target.stop()
def install_uninstall_packages(self, test_id, install=True):
pkg_dir = self.d.getVar("TEST_EXTRACTED_DIR")
super(ImageTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
class ExportTestContext(RuntimeTestContext):
def __init__(self, d, target, exported=False, parsedArgs={}):
super(ExportTestContext, self).__init__(d, target, exported)
tag = parsedArgs.get("tag", None)
self.tagexp = tag if tag != None else d.getVar("TEST_SUITES_TAGS")
self.sigterm = None
def install_uninstall_packages(self, test_id, install=True):
export_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
extracted_dir = self.d.getVar("TEST_EXPORT_EXTRACTED_DIR")
pkg_dir = os.path.join(export_dir, extracted_dir)
super(ExportTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
| true
| true
|
f71712d716cabab7ce71af1445f2108a94cac93f
| 2,041
|
py
|
Python
|
Support/Fuego/Pythia/pythia-0.5/packages/fuego/fuego/serialization/chemkin/unpickle/tokens/RegularExpressions.py
|
balos1/PelePhysics
|
d01190cc7b0eaad4ec96fac573034ccb485f0e9f
|
[
"BSD-3-Clause-LBNL"
] | 31
|
2018-11-21T01:49:06.000Z
|
2022-03-30T03:41:43.000Z
|
Support/Fuego/Pythia/pythia-0.5/packages/fuego/fuego/serialization/chemkin/unpickle/tokens/RegularExpressions.py
|
balos1/PelePhysics
|
d01190cc7b0eaad4ec96fac573034ccb485f0e9f
|
[
"BSD-3-Clause-LBNL"
] | 123
|
2019-03-12T22:27:29.000Z
|
2022-03-29T17:00:04.000Z
|
Support/Fuego/Pythia/pythia-0.5/packages/fuego/fuego/serialization/chemkin/unpickle/tokens/RegularExpressions.py
|
sundials-codes/PelePhysics
|
5624f83a04f43aa95288be9d8a7bb372a4adefe6
|
[
"BSD-3-Clause-LBNL"
] | 32
|
2018-11-05T11:51:59.000Z
|
2022-03-29T13:09:32.000Z
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Common regular expressions
eol = r"$"
whitespace = r"\s+"
whitespaceOpt = r"\s*"
element = r"[A-Za-z][\w+-]?"
namedElement = r"(?P<%s>" + element + r")"
species = r"[A-Za-z(][\w()=*,-]{0,15}[+]*"
coeff = r"\d+[.]?\d*"
coeffOpt = r"\d+[.]?\d*"
number = r"[+-]?(\d+[.]\d*|[.]\d+|\d+)([eE][-+]?\d{1,3})?"
numberOpt = r"(" + number + r")*"
namedNumber = r"(?P<%s>" + number + r")"
namedNumbers_3 = (
namedNumber + whitespace + namedNumber + whitespace + namedNumber
)
inlineNumber = r"/" + whitespaceOpt + number + whitespaceOpt + "/"
inlineNumbers = (
r"/"
+ whitespaceOpt
+ number
+ r"("
+ whitespace
+ number
+ ")*"
+ whitespaceOpt
+ "/"
)
namedInlineNumber = r"/" + whitespaceOpt + namedNumber + whitespaceOpt + "/"
namedInlineNumbers = (
r"/"
+ whitespaceOpt
+ r"(?P<%s>"
+ number
+ r"("
+ whitespace
+ number
+ r")*"
+ r")"
+ whitespaceOpt
+ "/"
)
namedInlineParameters = (
r"/"
+ whitespaceOpt
+ r"(?P<%s>"
+ r"(%s|%s)" % (species, number)
+ r"("
+ whitespace
+ r"(%s|%s)" % (species, number)
+ r")*"
+ r")"
+ whitespaceOpt
+ "/"
)
# Simpler version of the above
# namedInlineParameters = r"/" + whitespaceOpt + "(?P<%s>[^/]+)" + whitespaceOpt + r"/"
namedInlineNumbers_2 = (
r"/"
+ whitespaceOpt
+ namedNumber
+ whitespace
+ namedNumber
+ whitespaceOpt
+ "/"
)
namedInlineNumbers_3 = (
r"/"
+ whitespaceOpt
+ namedNumber
+ whitespace
+ namedNumber
+ whitespace
+ namedNumber
+ whitespaceOpt
+ "/"
)
# version
__id__ = "$Id$"
#
# End of file
| 18.898148
| 87
| 0.468398
|
eol = r"$"
whitespace = r"\s+"
whitespaceOpt = r"\s*"
element = r"[A-Za-z][\w+-]?"
namedElement = r"(?P<%s>" + element + r")"
species = r"[A-Za-z(][\w()=*,-]{0,15}[+]*"
coeff = r"\d+[.]?\d*"
coeffOpt = r"\d+[.]?\d*"
number = r"[+-]?(\d+[.]\d*|[.]\d+|\d+)([eE][-+]?\d{1,3})?"
numberOpt = r"(" + number + r")*"
namedNumber = r"(?P<%s>" + number + r")"
namedNumbers_3 = (
namedNumber + whitespace + namedNumber + whitespace + namedNumber
)
inlineNumber = r"/" + whitespaceOpt + number + whitespaceOpt + "/"
inlineNumbers = (
r"/"
+ whitespaceOpt
+ number
+ r"("
+ whitespace
+ number
+ ")*"
+ whitespaceOpt
+ "/"
)
namedInlineNumber = r"/" + whitespaceOpt + namedNumber + whitespaceOpt + "/"
namedInlineNumbers = (
r"/"
+ whitespaceOpt
+ r"(?P<%s>"
+ number
+ r"("
+ whitespace
+ number
+ r")*"
+ r")"
+ whitespaceOpt
+ "/"
)
namedInlineParameters = (
r"/"
+ whitespaceOpt
+ r"(?P<%s>"
+ r"(%s|%s)" % (species, number)
+ r"("
+ whitespace
+ r"(%s|%s)" % (species, number)
+ r")*"
+ r")"
+ whitespaceOpt
+ "/"
)
namedInlineNumbers_2 = (
r"/"
+ whitespaceOpt
+ namedNumber
+ whitespace
+ namedNumber
+ whitespaceOpt
+ "/"
)
namedInlineNumbers_3 = (
r"/"
+ whitespaceOpt
+ namedNumber
+ whitespace
+ namedNumber
+ whitespace
+ namedNumber
+ whitespaceOpt
+ "/"
)
__id__ = "$Id$"
| true
| true
|
f7171371220cb2defcde3ccbb021f461672d109a
| 258
|
py
|
Python
|
6 kyu/PI approximation.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
6 kyu/PI approximation.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
6 kyu/PI approximation.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
import math
def iter_pi(epsilon):
down=1
res=0
count=0
while abs(res*4-math.pi)>epsilon:
if count%2==0:
res+=1/down
else:
res-=1/down
count+=1
down+=2
return [count, round(res*4,10)]
| 19.846154
| 37
| 0.5
|
import math
def iter_pi(epsilon):
down=1
res=0
count=0
while abs(res*4-math.pi)>epsilon:
if count%2==0:
res+=1/down
else:
res-=1/down
count+=1
down+=2
return [count, round(res*4,10)]
| true
| true
|
f71713810aaf3fe2f07db20e361f46b522b96650
| 1,562
|
py
|
Python
|
newsletter/src/newsletter.py
|
emilybache/BeeFriendly
|
7582d8f7140f2d0088404d9cb1d47b6231606c49
|
[
"MIT"
] | 4
|
2020-06-14T13:42:39.000Z
|
2022-01-29T14:36:53.000Z
|
newsletter/src/newsletter.py
|
emilybache/BeeFriendly
|
7582d8f7140f2d0088404d9cb1d47b6231606c49
|
[
"MIT"
] | 2
|
2022-02-13T15:19:44.000Z
|
2022-02-25T12:32:16.000Z
|
newsletter/src/newsletter.py
|
emilybache/BeeFriendly
|
7582d8f7140f2d0088404d9cb1d47b6231606c49
|
[
"MIT"
] | 4
|
2020-06-11T09:31:45.000Z
|
2021-08-17T14:00:41.000Z
|
import sys
import json
import requests
from flask import Flask
from flask import request
from tracing import init_tracer, flask_to_scope
import opentracing
from opentracing.ext import tags
from opentracing_instrumentation.client_hooks import install_all_patches
from flask_opentracing import FlaskTracer
from flask_cors import CORS, cross_origin
app = Flask('newsletter')
init_tracer('newsletter')
install_all_patches()
CORS(app)
flask_tracer = FlaskTracer(opentracing.tracer, True, app)
@app.route("/sayHello/<name>")
@cross_origin()
def say_hello(name):
with flask_to_scope(flask_tracer, request) as scope:
person = get_person(name)
resp = format_greeting(person)
opentracing.tracer.active_span.set_tag('response', resp)
return resp
def get_person(name):
with opentracing.tracer.start_active_span(
'get-person',
) as scope:
url = 'http://localhost:3001/getPerson/%s' % name
res = _get(url)
person = json.loads(res)
scope.span.log_kv({
'name': person['name'],
'title': person['title'],
'description': person['description'],
})
return person
def format_greeting(person):
with opentracing.tracer.start_active_span(
'format-greeting',
):
url = 'http://localhost:3002/formatGreeting'
return _get(url, params=person)
def _get(url, params=None):
r = requests.get(url, params=params)
assert r.status_code == 200
return r.text
if __name__ == "__main__":
app.run(port=3000)
| 25.193548
| 72
| 0.68758
|
import sys
import json
import requests
from flask import Flask
from flask import request
from tracing import init_tracer, flask_to_scope
import opentracing
from opentracing.ext import tags
from opentracing_instrumentation.client_hooks import install_all_patches
from flask_opentracing import FlaskTracer
from flask_cors import CORS, cross_origin
app = Flask('newsletter')
init_tracer('newsletter')
install_all_patches()
CORS(app)
flask_tracer = FlaskTracer(opentracing.tracer, True, app)
@app.route("/sayHello/<name>")
@cross_origin()
def say_hello(name):
with flask_to_scope(flask_tracer, request) as scope:
person = get_person(name)
resp = format_greeting(person)
opentracing.tracer.active_span.set_tag('response', resp)
return resp
def get_person(name):
with opentracing.tracer.start_active_span(
'get-person',
) as scope:
url = 'http://localhost:3001/getPerson/%s' % name
res = _get(url)
person = json.loads(res)
scope.span.log_kv({
'name': person['name'],
'title': person['title'],
'description': person['description'],
})
return person
def format_greeting(person):
with opentracing.tracer.start_active_span(
'format-greeting',
):
url = 'http://localhost:3002/formatGreeting'
return _get(url, params=person)
def _get(url, params=None):
r = requests.get(url, params=params)
assert r.status_code == 200
return r.text
if __name__ == "__main__":
app.run(port=3000)
| true
| true
|
f71713d9bb5d058485271de369582288349ed788
| 5,787
|
py
|
Python
|
monkelabs.py
|
SolanaNFTCollector/Solana-Mining-Bot
|
026fa825ba92b0f6d52678cd425d08f63f9e523d
|
[
"MIT"
] | 26
|
2022-02-13T23:31:49.000Z
|
2022-03-31T21:36:28.000Z
|
monkelabs.py
|
SolanaNFTCollector/Solana-Mining-Bot
|
026fa825ba92b0f6d52678cd425d08f63f9e523d
|
[
"MIT"
] | 7
|
2022-02-13T23:33:01.000Z
|
2022-03-13T20:49:40.000Z
|
monkelabs.py
|
SolanaNFTCollector/Solana-Mining-Bot
|
026fa825ba92b0f6d52678cd425d08f63f9e523d
|
[
"MIT"
] | 9
|
2022-02-13T23:31:51.000Z
|
2022-03-30T08:50:10.000Z
|
import requests
import base64
import time
import os
import pathlib
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_manager.chrome import ChromeDriverManager
def mint(values, isWindows):
def initWallet():
# add wallet to chrome
driver.switch_to.window(driver.window_handles[1])
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Use Secret Recovery Phrase')]")))
recovery_phrase = driver.find_element(
By.XPATH, "//button[contains(text(),'Use Secret Recovery Phrase')]").click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//textarea[@placeholder='Secret phrase']")))
text_area = driver.find_element(
By.XPATH, "//textarea[@placeholder='Secret phrase']").send_keys(values[1])
import_btn = driver.find_element(
By.XPATH, "//button[@class='sc-bdfBQB bzlPNH']").click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//input[@placeholder='Password']")))
password1 = driver.find_element(
By.XPATH, "//input[@placeholder='Password']").send_keys(values[2])
password2 = driver.find_element(
By.XPATH, "//input[@placeholder='Confirm Password']").send_keys(values[2])
check_box = driver.find_element(
By.XPATH, "//input[@type='checkbox']").click()
submit = driver.find_element(
By.XPATH, "//button[@type='submit']").click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Continue')]")))
continue_ = driver.find_element(
By.XPATH, "//button[contains(text(),'Continue')]")
driver.execute_script("arguments[0].click();", continue_)
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Finish')]")))
finish = driver.find_element(
By.XPATH, "//button[contains(text(),'Finish')]")
driver.execute_script("arguments[0].click();", finish)
main_window = driver.window_handles[0]
driver.switch_to.window(main_window)
return main_window
def selectWallet():
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[span[contains(text(), 'Connect Wallet')]]")))
select_wallet = driver.find_element(
By.XPATH, "//button[span[contains(text(), 'Connect Wallet')]]")
select_wallet.click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[span[contains(text(), 'Phantom')]]")))
phantom = driver.find_element(
By.XPATH, "//button[span[contains(text(), 'Phantom')]]")
phantom.click()
original_window = driver.current_window_handle
WebDriverWait(driver, 60).until(EC.number_of_windows_to_be(2))
for window_handle in driver.window_handles:
if window_handle != original_window:
driver.switch_to.window(window_handle)
break
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Connect')]")))
popup_connect = driver.find_element(
By.XPATH, "//button[contains(text(),'Connect')]")
popup_connect.click()
driver.switch_to.window(main_window)
def awaitMint():
WebDriverWait(driver, 60*60*24).until(EC.presence_of_element_located(
(By.XPATH, "//button[span[contains(text(), 'MINT')]]")))
mint_your_token = driver.find_element(
By.XPATH, "//button[span[contains(text(), 'MINT')]]")
mint_your_token.click()
original_window = driver.current_window_handle
WebDriverWait(driver, 60).until(EC.number_of_windows_to_be(2))
for window_handle in driver.window_handles:
if window_handle != original_window:
driver.switch_to.window(window_handle)
break
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(), 'Approve')]")))
approve = driver.find_element(
By.XPATH, "//button[contains(text(), 'Approve')]")
approve.click()
print("Bot started")
if isWindows:
print("OS : Windows")
else:
print("OS : Mac")
options = Options()
options.add_extension("Phantom.crx")
options.add_argument("--disable-gpu")
# to keep window open after mint uncomment option below, side effect, will open alot of chrome windows
#options.add_experimental_option("detach", True)
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options)
print("Assertion - successfully found chrome driver")
# opens the launchpad page
driver.get(values[0])
driver.maximize_window()
# Actions - Initialize wallet
main_window = initWallet()
# Actions - select wallet on magic eden
selectWallet()
# Actions - close popup
# closePopup()
# Actions - MINTS WHEN TIMER IS UP
awaitMint()
print("Minting Finished")
| 40.753521
| 106
| 0.655953
|
import requests
import base64
import time
import os
import pathlib
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_manager.chrome import ChromeDriverManager
def mint(values, isWindows):
def initWallet():
driver.switch_to.window(driver.window_handles[1])
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Use Secret Recovery Phrase')]")))
recovery_phrase = driver.find_element(
By.XPATH, "//button[contains(text(),'Use Secret Recovery Phrase')]").click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//textarea[@placeholder='Secret phrase']")))
text_area = driver.find_element(
By.XPATH, "//textarea[@placeholder='Secret phrase']").send_keys(values[1])
import_btn = driver.find_element(
By.XPATH, "//button[@class='sc-bdfBQB bzlPNH']").click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//input[@placeholder='Password']")))
password1 = driver.find_element(
By.XPATH, "//input[@placeholder='Password']").send_keys(values[2])
password2 = driver.find_element(
By.XPATH, "//input[@placeholder='Confirm Password']").send_keys(values[2])
check_box = driver.find_element(
By.XPATH, "//input[@type='checkbox']").click()
submit = driver.find_element(
By.XPATH, "//button[@type='submit']").click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Continue')]")))
continue_ = driver.find_element(
By.XPATH, "//button[contains(text(),'Continue')]")
driver.execute_script("arguments[0].click();", continue_)
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Finish')]")))
finish = driver.find_element(
By.XPATH, "//button[contains(text(),'Finish')]")
driver.execute_script("arguments[0].click();", finish)
main_window = driver.window_handles[0]
driver.switch_to.window(main_window)
return main_window
def selectWallet():
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[span[contains(text(), 'Connect Wallet')]]")))
select_wallet = driver.find_element(
By.XPATH, "//button[span[contains(text(), 'Connect Wallet')]]")
select_wallet.click()
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[span[contains(text(), 'Phantom')]]")))
phantom = driver.find_element(
By.XPATH, "//button[span[contains(text(), 'Phantom')]]")
phantom.click()
original_window = driver.current_window_handle
WebDriverWait(driver, 60).until(EC.number_of_windows_to_be(2))
for window_handle in driver.window_handles:
if window_handle != original_window:
driver.switch_to.window(window_handle)
break
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(),'Connect')]")))
popup_connect = driver.find_element(
By.XPATH, "//button[contains(text(),'Connect')]")
popup_connect.click()
driver.switch_to.window(main_window)
def awaitMint():
WebDriverWait(driver, 60*60*24).until(EC.presence_of_element_located(
(By.XPATH, "//button[span[contains(text(), 'MINT')]]")))
mint_your_token = driver.find_element(
By.XPATH, "//button[span[contains(text(), 'MINT')]]")
mint_your_token.click()
original_window = driver.current_window_handle
WebDriverWait(driver, 60).until(EC.number_of_windows_to_be(2))
for window_handle in driver.window_handles:
if window_handle != original_window:
driver.switch_to.window(window_handle)
break
WebDriverWait(driver, 60).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(text(), 'Approve')]")))
approve = driver.find_element(
By.XPATH, "//button[contains(text(), 'Approve')]")
approve.click()
print("Bot started")
if isWindows:
print("OS : Windows")
else:
print("OS : Mac")
options = Options()
options.add_extension("Phantom.crx")
options.add_argument("--disable-gpu")
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options)
print("Assertion - successfully found chrome driver")
driver.get(values[0])
driver.maximize_window()
main_window = initWallet()
selectWallet()
awaitMint()
print("Minting Finished")
| true
| true
|
f71713f48e400918eea9548ed6280ac938049c7b
| 8,414
|
py
|
Python
|
nmt/onmt/translate/Translator.py
|
Priyansh2/csnli
|
de31f3f5ae0a956496b76a4643fa9ce7f3736d29
|
[
"MIT"
] | 21
|
2018-08-29T13:56:35.000Z
|
2021-07-12T23:25:14.000Z
|
nmt/onmt/translate/Translator.py
|
Priyansh2/csnli
|
de31f3f5ae0a956496b76a4643fa9ce7f3736d29
|
[
"MIT"
] | 4
|
2018-12-08T17:33:31.000Z
|
2021-05-16T08:41:16.000Z
|
nmt/onmt/translate/Translator.py
|
Priyansh2/csnli
|
de31f3f5ae0a956496b76a4643fa9ce7f3736d29
|
[
"MIT"
] | 10
|
2018-09-17T05:27:09.000Z
|
2021-11-01T08:18:30.000Z
|
import torch
from torch.autograd import Variable
import onmt.translate.Beam
import onmt.io
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
cuda (bool): use cuda
beam_trace (bool): trace beam search for debugging
"""
def __init__(self, model, fields,
beam_size, n_best=1,
max_length=100,
global_scorer=None,
copy_attn=False,
cuda=False,
beam_trace=False,
min_length=0,
stepwise_penalty=False):
self.model = model
self.fields = fields
self.n_best = n_best
self.max_length = max_length
self.global_scorer = global_scorer
self.copy_attn = copy_attn
self.beam_size = beam_size
self.cuda = cuda
self.min_length = min_length
self.stepwise_penalty = stepwise_penalty
# for debugging
self.beam_accum = None
if beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def translate_batch(self, batch, data):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
Todo:
Shouldn't need the original dataset.
"""
# (0) Prep each of the components of the search.
# And helper method for reducing verbosity.
beam_size = self.beam_size
batch_size = batch.batch_size
data_type = data.data_type
vocab = self.fields["tgt"].vocab
beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,
cuda=self.cuda,
global_scorer=self.global_scorer,
pad=vocab.stoi[onmt.io.PAD_WORD],
eos=vocab.stoi[onmt.io.EOS_WORD],
bos=vocab.stoi[onmt.io.BOS_WORD],
min_length=self.min_length,
stepwise_penalty=self.stepwise_penalty)
for __ in range(batch_size)]
# Help functions for working with beams and batches
def var(a): return Variable(a, volatile=True)
def rvar(a): return var(a.repeat(1, beam_size, 1))
def bottle(m):
return m.view(batch_size * beam_size, -1)
def unbottle(m):
return m.view(beam_size, batch_size, -1)
# (1) Run the encoder on the src.
src = onmt.io.make_features(batch, 'src', data_type)
src_lengths = None
if data_type == 'text':
_, src_lengths = batch.src
enc_states, memory_bank = self.model.encoder(src, src_lengths)
dec_states = self.model.decoder.init_decoder_state(
src, memory_bank, enc_states)
if src_lengths is None:
src_lengths = torch.Tensor(batch_size).type_as(memory_bank.data)\
.long()\
.fill_(memory_bank.size(0))
# (2) Repeat src objects `beam_size` times.
src_map = rvar(batch.src_map.data) \
if data_type == 'text' and self.copy_attn else None
memory_bank = rvar(memory_bank.data)
memory_lengths = src_lengths.repeat(beam_size)
dec_states.repeat_beam_size_times(beam_size)
# (3) run the decoder to generate sentences, using beam search.
for i in range(self.max_length):
if all((b.done() for b in beam)):
break
# Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
inp = var(torch.stack([b.get_current_state() for b in beam])
.t().contiguous().view(1, -1))
# Turn any copied words to UNKs
# 0 is unk
if self.copy_attn:
inp = inp.masked_fill(
inp.gt(len(self.fields["tgt"].vocab) - 1), 0)
# Temporary kludge solution to handle changed dim expectation
# in the decoder
inp = inp.unsqueeze(2)
# Run one step.
dec_out, dec_states, attn = self.model.decoder(
inp, memory_bank, dec_states, memory_lengths=memory_lengths)
dec_out = dec_out.squeeze(0)
# dec_out: beam x rnn_size
# (b) Compute a vector of batch x beam word scores.
if not self.copy_attn:
out = self.model.generator.forward(dec_out).data
out = unbottle(out)
# beam x tgt_vocab
beam_attn = unbottle(attn["std"])
else:
out = self.model.generator.forward(dec_out,
attn["copy"].squeeze(0),
src_map)
# beam x (tgt_vocab + extra_vocab)
out = data.collapse_copy_scores(
unbottle(out.data),
batch, self.fields["tgt"].vocab, data.src_vocabs)
# beam x tgt_vocab
out = out.log()
beam_attn = unbottle(attn["copy"])
# (c) Advance each beam.
for j, b in enumerate(beam):
b.advance(out[:, j],
beam_attn.data[:, j, :memory_lengths[j]])
dec_states.beam_update(j, b.get_current_origin(), beam_size)
# (4) Extract sentences from beam.
ret = self._from_beam(beam)
ret["gold_score"] = [0] * batch_size
if "tgt" in batch.__dict__:
ret["gold_score"] = self._run_target(batch, data)
ret["batch"] = batch
return ret
def _from_beam(self, beam):
ret = {"predictions": [],
"scores": [],
"attention": []}
for b in beam:
n_best = self.n_best
scores, ks = b.sort_finished(minimum=n_best)
hyps, attn = [], []
for i, (times, k) in enumerate(ks[:n_best]):
hyp, att = b.get_hyp(times, k)
hyps.append(hyp)
attn.append(att)
ret["predictions"].append(hyps)
ret["scores"].append(scores)
ret["attention"].append(attn)
return ret
def _run_target(self, batch, data):
data_type = data.data_type
if data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
src = onmt.io.make_features(batch, 'src', data_type)
tgt_in = onmt.io.make_features(batch, 'tgt')[:-1]
# (1) run the encoder on the src
enc_states, memory_bank = self.model.encoder(src, src_lengths)
dec_states = \
self.model.decoder.init_decoder_state(src, memory_bank, enc_states)
# (2) if a target is specified, compute the 'goldScore'
# (i.e. log likelihood) of the target under the model
tt = torch.cuda if self.cuda else torch
gold_scores = tt.FloatTensor(batch.batch_size).fill_(0)
dec_out, dec_states, attn = self.model.decoder(
tgt_in, memory_bank, dec_states, memory_lengths=src_lengths)
tgt_pad = self.fields["tgt"].vocab.stoi[onmt.io.PAD_WORD]
for dec, tgt in zip(dec_out, batch.tgt[1:].data):
# Log prob of each word.
out = self.model.generator.forward(dec)
tgt = tgt.unsqueeze(1)
scores = out.data.gather(1, tgt)
scores.masked_fill_(tgt.eq(tgt_pad), 0)
gold_scores += scores
return gold_scores
| 37.730942
| 79
| 0.537794
|
import torch
from torch.autograd import Variable
import onmt.translate.Beam
import onmt.io
class Translator(object):
def __init__(self, model, fields,
beam_size, n_best=1,
max_length=100,
global_scorer=None,
copy_attn=False,
cuda=False,
beam_trace=False,
min_length=0,
stepwise_penalty=False):
self.model = model
self.fields = fields
self.n_best = n_best
self.max_length = max_length
self.global_scorer = global_scorer
self.copy_attn = copy_attn
self.beam_size = beam_size
self.cuda = cuda
self.min_length = min_length
self.stepwise_penalty = stepwise_penalty
self.beam_accum = None
if beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def translate_batch(self, batch, data):
beam_size = self.beam_size
batch_size = batch.batch_size
data_type = data.data_type
vocab = self.fields["tgt"].vocab
beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,
cuda=self.cuda,
global_scorer=self.global_scorer,
pad=vocab.stoi[onmt.io.PAD_WORD],
eos=vocab.stoi[onmt.io.EOS_WORD],
bos=vocab.stoi[onmt.io.BOS_WORD],
min_length=self.min_length,
stepwise_penalty=self.stepwise_penalty)
for __ in range(batch_size)]
def var(a): return Variable(a, volatile=True)
def rvar(a): return var(a.repeat(1, beam_size, 1))
def bottle(m):
return m.view(batch_size * beam_size, -1)
def unbottle(m):
return m.view(beam_size, batch_size, -1)
src = onmt.io.make_features(batch, 'src', data_type)
src_lengths = None
if data_type == 'text':
_, src_lengths = batch.src
enc_states, memory_bank = self.model.encoder(src, src_lengths)
dec_states = self.model.decoder.init_decoder_state(
src, memory_bank, enc_states)
if src_lengths is None:
src_lengths = torch.Tensor(batch_size).type_as(memory_bank.data)\
.long()\
.fill_(memory_bank.size(0))
src_map = rvar(batch.src_map.data) \
if data_type == 'text' and self.copy_attn else None
memory_bank = rvar(memory_bank.data)
memory_lengths = src_lengths.repeat(beam_size)
dec_states.repeat_beam_size_times(beam_size)
for i in range(self.max_length):
if all((b.done() for b in beam)):
break
inp = var(torch.stack([b.get_current_state() for b in beam])
.t().contiguous().view(1, -1))
if self.copy_attn:
inp = inp.masked_fill(
inp.gt(len(self.fields["tgt"].vocab) - 1), 0)
inp = inp.unsqueeze(2)
dec_out, dec_states, attn = self.model.decoder(
inp, memory_bank, dec_states, memory_lengths=memory_lengths)
dec_out = dec_out.squeeze(0)
if not self.copy_attn:
out = self.model.generator.forward(dec_out).data
out = unbottle(out)
beam_attn = unbottle(attn["std"])
else:
out = self.model.generator.forward(dec_out,
attn["copy"].squeeze(0),
src_map)
out = data.collapse_copy_scores(
unbottle(out.data),
batch, self.fields["tgt"].vocab, data.src_vocabs)
out = out.log()
beam_attn = unbottle(attn["copy"])
for j, b in enumerate(beam):
b.advance(out[:, j],
beam_attn.data[:, j, :memory_lengths[j]])
dec_states.beam_update(j, b.get_current_origin(), beam_size)
ret = self._from_beam(beam)
ret["gold_score"] = [0] * batch_size
if "tgt" in batch.__dict__:
ret["gold_score"] = self._run_target(batch, data)
ret["batch"] = batch
return ret
def _from_beam(self, beam):
ret = {"predictions": [],
"scores": [],
"attention": []}
for b in beam:
n_best = self.n_best
scores, ks = b.sort_finished(minimum=n_best)
hyps, attn = [], []
for i, (times, k) in enumerate(ks[:n_best]):
hyp, att = b.get_hyp(times, k)
hyps.append(hyp)
attn.append(att)
ret["predictions"].append(hyps)
ret["scores"].append(scores)
ret["attention"].append(attn)
return ret
def _run_target(self, batch, data):
data_type = data.data_type
if data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
src = onmt.io.make_features(batch, 'src', data_type)
tgt_in = onmt.io.make_features(batch, 'tgt')[:-1]
enc_states, memory_bank = self.model.encoder(src, src_lengths)
dec_states = \
self.model.decoder.init_decoder_state(src, memory_bank, enc_states)
tt = torch.cuda if self.cuda else torch
gold_scores = tt.FloatTensor(batch.batch_size).fill_(0)
dec_out, dec_states, attn = self.model.decoder(
tgt_in, memory_bank, dec_states, memory_lengths=src_lengths)
tgt_pad = self.fields["tgt"].vocab.stoi[onmt.io.PAD_WORD]
for dec, tgt in zip(dec_out, batch.tgt[1:].data):
out = self.model.generator.forward(dec)
tgt = tgt.unsqueeze(1)
scores = out.data.gather(1, tgt)
scores.masked_fill_(tgt.eq(tgt_pad), 0)
gold_scores += scores
return gold_scores
| true
| true
|
f71713fdfa11b82fe9b89f6419ce8da3b95a2129
| 389
|
py
|
Python
|
nov q3.py
|
Manthanc007/APS-2o2o
|
a84337c4e658a93b6c67515fa3ef59b09f2e5e94
|
[
"MIT"
] | null | null | null |
nov q3.py
|
Manthanc007/APS-2o2o
|
a84337c4e658a93b6c67515fa3ef59b09f2e5e94
|
[
"MIT"
] | null | null | null |
nov q3.py
|
Manthanc007/APS-2o2o
|
a84337c4e658a93b6c67515fa3ef59b09f2e5e94
|
[
"MIT"
] | null | null | null |
l=list(map(int,input().split()))
output=[]
l1=[ 2**i for i in range(2,26)]
a=1
l2=[]
for i in range(len(l1)):
a=l1[i]-a
l2.append(a)
l2.insert(0,1)
l2.insert(0,1)
l1.insert(0,2)
l1.insert(0,0)
l2.insert(0,0)
for i in range(1,len(l)):
a1=l[i]
output.append(l2[a1])
output.append(l1[a1])
for i in range(len(output)):
print(output[i],end=" ")
| 18.52381
| 33
| 0.55527
|
l=list(map(int,input().split()))
output=[]
l1=[ 2**i for i in range(2,26)]
a=1
l2=[]
for i in range(len(l1)):
a=l1[i]-a
l2.append(a)
l2.insert(0,1)
l2.insert(0,1)
l1.insert(0,2)
l1.insert(0,0)
l2.insert(0,0)
for i in range(1,len(l)):
a1=l[i]
output.append(l2[a1])
output.append(l1[a1])
for i in range(len(output)):
print(output[i],end=" ")
| true
| true
|
f71715f08d40ed8ad31bcc3c4ff219a7c0938071
| 615
|
py
|
Python
|
test/test_form_mutation.py
|
smalllark/mechanize
|
a514b8474ca1d65514f6c569f7b10a40906fb014
|
[
"BSD-3-Clause"
] | 3
|
2015-10-07T05:27:44.000Z
|
2017-07-01T05:18:01.000Z
|
test/test_form_mutation.py
|
smalllark/mechanize
|
a514b8474ca1d65514f6c569f7b10a40906fb014
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_form_mutation.py
|
smalllark/mechanize
|
a514b8474ca1d65514f6c569f7b10a40906fb014
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from unittest import TestCase
import mechanize
def first_form(text, base_uri="http://example.com/"):
return mechanize.ParseString(text, base_uri)[0]
class MutationTests(TestCase):
def test_add_textfield(self):
form = first_form('<input type="text" name="foo" value="bar" />')
more = first_form('<input type="text" name="spam" value="eggs" />')
combined = form.controls + more.controls
for control in more.controls:
control.add_to_form(form)
self.assertEquals(form.controls, combined)
if __name__ == "__main__":
unittest.main()
| 25.625
| 75
| 0.673171
|
import unittest
from unittest import TestCase
import mechanize
def first_form(text, base_uri="http://example.com/"):
return mechanize.ParseString(text, base_uri)[0]
class MutationTests(TestCase):
def test_add_textfield(self):
form = first_form('<input type="text" name="foo" value="bar" />')
more = first_form('<input type="text" name="spam" value="eggs" />')
combined = form.controls + more.controls
for control in more.controls:
control.add_to_form(form)
self.assertEquals(form.controls, combined)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f71716f48a697e06f2c811b63ac0163ef6dae9d4
| 12,993
|
py
|
Python
|
OXTR_pathway.py
|
rribeiro-sci/OXTR
|
521f60be922e731757b74d90d9b00c4130c99575
|
[
"Apache-2.0"
] | null | null | null |
OXTR_pathway.py
|
rribeiro-sci/OXTR
|
521f60be922e731757b74d90d9b00c4130c99575
|
[
"Apache-2.0"
] | null | null | null |
OXTR_pathway.py
|
rribeiro-sci/OXTR
|
521f60be922e731757b74d90d9b00c4130c99575
|
[
"Apache-2.0"
] | null | null | null |
'''
THE OXYTOCIN RECEPTOR METABOLIC PATHWAY
VERSION 1.0
G alpha q11 coupled receptor
last modification 4 October 2020
References:
1. Chang, Chiung-wen, Ethan Poteet, John A. Schetz, Zeynep H. Gümüş,
and Harel Weinstein. 2009. “Towards a Quantitative Representation
of the Cell Signaling Mechanisms of Hallucinogens: Measurement and
Mathematical Modeling of 5-HT1A and 5-HT2A Receptor-Mediated ERK1/2
Activation.” Neuropharmacology 56 (Suppl 1): 213–25.
2. Keizer, J, and G W De Young. 1992. “Two Roles of Ca2+ in Agonist
Stimulated Ca2+ Oscillations.” Biophysical Journal 61 (3): 649–60.
'''
#!/usr/bin/env python
import os
import sys
import math
import numpy as np
import sympy
from sympy import Piecewise
from pysb import *
from pysb.macros import *
from pysb.integrate import Solver
from pysb.simulator import ScipyOdeSimulator
from pysb.macros import create_t_obs, drug_binding
__author__ = "Rui Ribeiro"
__organizarion__ = "University of Verona"
__copyright__ = "Copyright 2020, Rui Ribeiro"
__credits__ = ["Rui Ribeiro","Pietro Micheli"]
__license__ = ""
__version__ = "1.0"
__maintainer__ = "Rui Ribeiro"
__email__ = "rui.ribeiro@univr.it"
__status__ = "Production"
USAGE = __doc__.format(__author__, __email__)
def network(time_in, time_out, kf_fold):
Model()
##TIME OBSERVABLE
components_time_obs = create_t_obs()
time_obs = components_time_obs.t
##SPECIES
#Receptor
R_conc = 1.4107
Monomer('R', ['R_b1', 'R_b2', 'R_s'], {'R_s':['inact', 'act']})
Parameter('R_init', R_conc * 0.73)
Initial(R(R_b1=None, R_b2=None, R_s='inact'), R_init)
Observable('obs_R', R(R_b1=None, R_b2=None, R_s='inact'))
#Ligand
L_conc = 0.1
Monomer('L', ['L_b1'])
Parameter('L_init', L_conc)
Initial(L(L_b1=None), L_init)
Observable('obs_L', L(L_b1=None))
#G-Protein
Monomer('Gq_a', ['Gq_a_b1', 'Gq_a_b2', 'Gq_a_s'], {'Gq_a_s' : ['GTP', 'GDP']})
Parameter('Gq_a_GDP_init', 0.0027739)
Parameter('Gq_a_GTP_init', 6.4172E-4)
Initial(Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP'), Gq_a_GDP_init)
Initial(Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP'), Gq_a_GTP_init)
Observable('obs_Gq_a_GDP', Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP'))
Observable('obs_Gq_a_GTP', Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP'))
Monomer('Gq_bg', ['Gq_bg_b1', 'Gq_bg_b2'])
Parameter('Gq_bg_init', 0.0037173)
Initial(Gq_bg(Gq_bg_b1=None, Gq_bg_b2=None), Gq_bg_init)
Observable('obs_Gq_bg', Gq_bg(Gq_bg_b1=None, Gq_bg_b2=None))
#RGS4
Monomer('RGS4', ['RGS4_b1'])
Parameter('RGS4_init', 0.019994)
Parameter('RGS4_Gq_a_GTP_init', 6.4168E-6)
Initial(RGS4(RGS4_b1=None), RGS4_init)
Initial(RGS4(RGS4_b1=50)%Gq_a(Gq_a_b1=50, Gq_a_b2=None, Gq_a_s='GTP'), RGS4_Gq_a_GTP_init)
#Ca2+
Monomer('Ca', ['Ca_b1'])
Parameter('Ca_init', 0.1)
Initial(Ca(Ca_b1=None), Ca_init)
Observable('obs_Ca', Ca(Ca_b1=None))
Monomer('CaER')
Parameter('CaER_init', 1)
Initial(CaER(), CaER_init)
Observable('obs_CaER', CaER())
#PLCb
Monomer('PLCb', ['PLCb_b1', 'PLCb_b2'])
Parameter('PLCb_init', 0.090022)
Parameter('PLCb_Gq_a_GTP_init', 1.4492E-4)
Parameter('PLCb_Ca_init', 0.0093825)
Parameter('PLCb_Ca_Gq_a_GTP_init', 1.5038E-4)
Initial(PLCb(PLCb_b1=None, PLCb_b2=None), PLCb_init)
Initial(PLCb(PLCb_b1=60, PLCb_b2=None)%Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP'), PLCb_Gq_a_GTP_init)
Initial(PLCb(PLCb_b1=None, PLCb_b2=70)%Ca(Ca_b1=70), PLCb_Ca_init)
Initial(Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=70)%Ca(Ca_b1=70), PLCb_Ca_Gq_a_GTP_init)
Observable('obs_PLCb', PLCb(PLCb_b1=None, PLCb_b2=None))
Observable('obs_PLCb_Ca_Gq_a_GTP', Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=70)%Ca(Ca_b1=70))
Observable('obs_PLCb_Gq_a_GTP', Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=None))
Observable('obs_PLCb_Ca', PLCb(PLCb_b1=None, PLCb_b2=70)%Ca(Ca_b1=70))
#PIP2
Monomer('PIP2', ['PIP2_b1'])
Parameter('PIP2_init', 2.6578)
Initial(PIP2(PIP2_b1=None), PIP2_init)
Observable('obs_PIP2', PIP2(PIP2_b1=None))
#IP3
Monomer('IP3', ['IP3_b1'])
Parameter('IP3_init', 0.21952)
Initial(IP3(IP3_b1=None), IP3_init)
Observable('obs_IP3', IP3(IP3_b1=None))
#DAG
Monomer('DAG', ['DAG_b1', 'DAG_s'], {'DAG_s' : ['act', 'inact']})
Parameter('DAG_init', 0.055555)
Initial(DAG(DAG_b1=None, DAG_s='inact'), DAG_init)
Observable('obs_DAG', DAG(DAG_b1=None, DAG_s='inact'))
#IP3R
Monomer('IP3R', ['IP3R_b1', 'IP3R_b2'])
Parameter('IP3R_init', 0.119)
Initial(IP3R(IP3R_b1=None, IP3R_b2=None), IP3R_init)
Observable('obs_IP3R', IP3R(IP3R_b1=None, IP3R_b2=None))
##Complexes
Parameter('R_Gq_trimer_init', R_conc*0.27)
Initial(R(R_b1=30, R_b2=None, R_s='inact')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None), R_Gq_trimer_init)
Parameter('R_L_Gq_trimer_init', 0)
Initial(R(R_b1=30, R_b2=50, R_s='act')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None)%L(L_b1=50), R_L_Gq_trimer_init)
Parameter('Gq_trimer_init', 0.61869)
Initial(Gq_a(Gq_a_b1=None, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None), Gq_trimer_init)
################################################################################################################################################################################
##RULES
#Receptor-Ligand
Expression('R_L_kf', Piecewise((0, time_obs > time_out),(1.00, time_obs > time_in),(0, True)))
Parameter('R_L_kr', 2.50)
Rule('R_L', R(R_b1=None, R_b2=None, R_s='inact') + L(L_b1=None) | R(R_b1=None, R_b2=50, R_s='act')%L(L_b1=50), R_L_kf, R_L_kr)
Observable('obs_RL', R(R_b1=None, R_b2=50, R_s='act')%L(L_b1=50))
#G-PROTEIN ACTIVATION
#R+Gtrimer
Parameter('R_Gq_trimer_kf', 1.00)
Parameter('R_Gq_trimer_kr', 1.67)
Rule('R_Gq_trimer', R(R_b1=None, R_b2=None, R_s='inact') + Gq_a(Gq_a_b1=None, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None) | R(R_b1=30, R_b2=None, R_s='inact')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None), R_Gq_trimer_kf, R_Gq_trimer_kr)
Observable('obs_RG', R(R_b1=30, R_b2=None, R_s='inact')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None))
#RL+Gtrimer
Expression('R_L_Gq_trimer_kf', 1.00*Parameter('fold', kf_fold))
Parameter('R_L_Gq_trimer_kr', 0.0046)
Rule('R_L_Gq_trimer', R(R_b1=None, R_b2=50, R_s='act')%L(L_b1=50) + Gq_a(Gq_a_b1=None, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None) | R(R_b1=30, R_b2=50, R_s='act')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None)%L(L_b1=50), R_L_Gq_trimer_kf, R_L_Gq_trimer_kr)
Observable('obs_trimer', R(R_b1=30, R_b2=50, R_s='act')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None)%L(L_b1=50))
#Gq_trimerization
Parameter('Gq_trimerization_k', 6.0)
Parameter('Gq_trimer_split_k', 0.0001)
Rule('Gq_trimerization', Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP') + Gq_bg(Gq_bg_b1=None, Gq_bg_b2=None) | Gq_a(Gq_a_b1=None, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None), Gq_trimerization_k, Gq_trimer_split_k)
#RL_Gq split
Parameter('R_L_Gq_trimer_split_k', 0.04)
Rule('R_L_Gq_trimer_split', R(R_b1=30, R_b2=50, R_s='act')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None)%L(L_b1=50) >> R(R_b1=None, R_b2=50, R_s='act')%L(L_b1=50) + Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP') + Gq_bg(Gq_bg_b1=None, Gq_bg_b2=None), R_L_Gq_trimer_split_k)
#Deactivation of Gq_a_GTP by RGS4
#RGS4 + Gq_a_GTP
Parameter('RGS4_Gq_a_GTP_kf', 20.83)
Parameter('RGS4_Gq_a_GTP_kr', 33.32)
Rule('RGS4_Gq_a_GTP', RGS4(RGS4_b1=None) + Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP') | RGS4(RGS4_b1=50)%Gq_a(Gq_a_b1=50, Gq_a_b2=None, Gq_a_s='GTP'), RGS4_Gq_a_GTP_kf, RGS4_Gq_a_GTP_kr)
#RGS4_Gq_a_GTP dissociation
Parameter('RGS4_Gq_a_GTP_diss_k', 8.33)
Rule('RGS4_Gq_a_GTP_diss', RGS4(RGS4_b1=50)%Gq_a(Gq_a_b1=50, Gq_a_b2=None, Gq_a_s='GTP') >> RGS4(RGS4_b1=None) + Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP'), RGS4_Gq_a_GTP_diss_k)
#Gq_a_GTP decay
Parameter('Gq_a_GTP_decay_k', 0.01)
Rule('Gq_a_GTP_decay', Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP') >> Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP'), Gq_a_GTP_decay_k)
#ACTIVATION OF PLCb
#Gq_a_GTP + PLCb
Parameter('Gq_a_GTP_PLCb_kf', 2.52)
Parameter('Gq_a_GTP_PLCb_kr', 1.00)
Rule('Gq_a_GTP_PLCb', Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP') + PLCb(PLCb_b1=None, PLCb_b2=None) | Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=None), Gq_a_GTP_PLCb_kf, Gq_a_GTP_PLCb_kr)
#Gq_a_GTP_PLCb + Ca
Parameter('Gq_a_GTP_PLCb_Ca_kf', 30.0)
Parameter('Gq_a_GTP_PLCb_Ca_kr', 1.00)
Rule('Gq_a_GTP_PLCb_Ca', Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=None) + Ca(Ca_b1=None) | Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=1)%Ca(Ca_b1=1), Gq_a_GTP_PLCb_Ca_kf, Gq_a_GTP_PLCb_Ca_kr)
#Gq_a_GTP_PLCb_Ca DECAY
Parameter('Gq_a_GTP_PLCb_Ca_decay_k', 0.013)
Rule('Gq_a_GTP_PLCb_Ca_diss', Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=1)%Ca(Ca_b1=1) >> Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP') + PLCb(PLCb_b1=None, PLCb_b2=1)%Ca(Ca_b1=1), Gq_a_GTP_PLCb_Ca_decay_k)
#PLCb + Ca
Parameter('PLCb_Ca_kf', 3.00)
Parameter('PLCb_Ca_kr', 1.00)
Rule('PLCb_Ca', PLCb(PLCb_b1=None, PLCb_b2=None) + Ca(Ca_b1=None) | PLCb(PLCb_b1=None, PLCb_b2=1)%Ca(Ca_b1=1), PLCb_Ca_kf, PLCb_Ca_kr)
#PLCb_Ca + Gq_a_GTP
Parameter('PLCb_Ca_Gq_a_GTP_kf', 25.2)
Parameter('PLCb_Ca_Gq_a_GTP_kr', 1.00)
Rule('PLCb_Ca_Gq_a_GTP', PLCb(PLCb_b1=None, PLCb_b2=1)%Ca(Ca_b1=1) + Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP') | Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=1)%Ca(Ca_b1=1), PLCb_Ca_Gq_a_GTP_kf, PLCb_Ca_Gq_a_GTP_kr)
#IP3 AND DAG PRODUCTION
#PIP2_PLCb_Ca
Expression('PIP2_PLCb_Ca_k', (10.0 * obs_PIP2)/(40.13 + obs_PIP2))
Rule('PIP2_PLCb_Ca', PLCb(PLCb_b1=None, PLCb_b2=1)%Ca(Ca_b1=1) >> IP3(IP3_b1=None) + DAG(DAG_b1=None, DAG_s='inact') + PLCb(PLCb_b1=None, PLCb_b2=1)%Ca(Ca_b1=1), PIP2_PLCb_Ca_k )
#PIP2_Gq_a_GTP_PLCb_Ca
Expression('PIP2_Gq_a_GTP_PLCb_Ca_k', (48.0 * obs_PIP2)/(5.00 + obs_PIP2))
Rule('PIP2_Gq_a_GTP_PLCb_Ca', Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=1)%Ca(Ca_b1=1) >> IP3(IP3_b1=None) + DAG(DAG_b1=None, DAG_s='inact') + Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=1)%Ca(Ca_b1=1), PIP2_Gq_a_GTP_PLCb_Ca_k )
#IP3 degradation
Expression('IP3_deg_k', (0.14*(obs_IP3 - IP3_init))/obs_IP3)
Rule('IP3_deg', IP3(IP3_b1=None) >> None, IP3_deg_k)
#DAG degradation
Parameter('DAG_deg_k', 0.15)
Rule('DAG_deg', DAG(DAG_b1=None, DAG_s='inact') >> None, DAG_deg_k)
#Kinetics of IP3R
#IP3R binds IP3
Parameter('IP3R_IP3_kf', 50)
Parameter('IPR3_IP3_kr', 6.5)
Rule('IP3R_IP3', IP3R(IP3R_b1=None, IP3R_b2=None) + IP3(IP3_b1=None) | IP3R(IP3R_b1=50, IP3R_b2=None)%IP3(IP3_b1=50), IP3R_IP3_kf, IPR3_IP3_kr)
Observable('obs_IP3R_IP3', IP3R(IP3R_b1=50, IP3R_b2=None)%IP3(IP3_b1=50))
#IP3R binds Ca
Parameter('IP3R_Ca_kf', 20.0)
Parameter('IPR3_Ca_kr', 0.0806)
Rule('IP3R_Ca', IP3R(IP3R_b1=None, IP3R_b2=None) + Ca(Ca_b1=None) | IP3R(IP3R_b1=None, IP3R_b2=30)%Ca(Ca_b1=30), IP3R_Ca_kf, IPR3_Ca_kr)
Observable('obs_IP3R_Ca', IP3R(IP3R_b1=None, IP3R_b2=30)%Ca(Ca_b1=30))
#IP3R:IP3 binds Ca
Parameter('IP3R_IP3_Ca_kf', 1.0)
Parameter('IPR3_IP3_Ca_kr', 0.5)
Rule('IP3R_IP3_Ca', IP3R(IP3R_b1=50, IP3R_b2=None)%IP3(IP3_b1=50) + Ca(Ca_b1=None) | IP3R(IP3R_b1=50, IP3R_b2=30)%IP3(IP3_b1=50)%Ca(Ca_b1=30), IP3R_IP3_Ca_kf, IPR3_IP3_Ca_kr)
#IP3R:Ca binds IP3
Parameter('IP3R_Ca_IP3_kf', 20.0)
Parameter('IPR3_Ca_IP3_kr', 14.5)
Rule('IP3_Ca_IP3', IP3R(IP3R_b1=None, IP3R_b2=30)%Ca(Ca_b1=30) + IP3(IP3_b1=None) | IP3R(IP3R_b1=50, IP3R_b2=30)%IP3(IP3_b1=50)%Ca(Ca_b1=30), IP3R_Ca_IP3_kf, IPR3_Ca_IP3_kr)
Observable('obs_IP3R_IP3_Ca', IP3R(IP3R_b1=50, IP3R_b2=30)%IP3(IP3_b1=50)%Ca(Ca_b1=30))
#ER Ca release
Parameter('v1', 800)
Parameter('v8', 0.15)
Parameter('v4', 0.5)
Parameter('k4', 0.09)
Parameter('c2', 0.185)
Expression('CaER_release_k', (c2*(v1*((obs_IP3R_IP3/IP3_init)**4)+v8)*(obs_CaER-obs_Ca)))
Rule('CaER_release', None >> Ca(Ca_b1=None), CaER_release_k)
Expression('Ca_inwards_k', (v4*(obs_Ca**2)/((obs_Ca**2)+(k4**2)))/obs_Ca)
Rule('Ca_inwards', Ca(Ca_b1=None) >> None, Ca_inwards_k)
return model
| 47.076087
| 308
| 0.682444
|
import os
import sys
import math
import numpy as np
import sympy
from sympy import Piecewise
from pysb import *
from pysb.macros import *
from pysb.integrate import Solver
from pysb.simulator import ScipyOdeSimulator
from pysb.macros import create_t_obs, drug_binding
__author__ = "Rui Ribeiro"
__organizarion__ = "University of Verona"
__copyright__ = "Copyright 2020, Rui Ribeiro"
__credits__ = ["Rui Ribeiro","Pietro Micheli"]
__license__ = ""
__version__ = "1.0"
__maintainer__ = "Rui Ribeiro"
__email__ = "rui.ribeiro@univr.it"
__status__ = "Production"
USAGE = __doc__.format(__author__, __email__)
def network(time_in, time_out, kf_fold):
Model()
time_obs = create_t_obs()
time_obs = components_time_obs.t
R_conc = 1.4107
Monomer('R', ['R_b1', 'R_b2', 'R_s'], {'R_s':['inact', 'act']})
Parameter('R_init', R_conc * 0.73)
Initial(R(R_b1=None, R_b2=None, R_s='inact'), R_init)
Observable('obs_R', R(R_b1=None, R_b2=None, R_s='inact'))
L_conc = 0.1
Monomer('L', ['L_b1'])
Parameter('L_init', L_conc)
Initial(L(L_b1=None), L_init)
Observable('obs_L', L(L_b1=None))
Monomer('Gq_a', ['Gq_a_b1', 'Gq_a_b2', 'Gq_a_s'], {'Gq_a_s' : ['GTP', 'GDP']})
Parameter('Gq_a_GDP_init', 0.0027739)
Parameter('Gq_a_GTP_init', 6.4172E-4)
Initial(Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP'), Gq_a_GDP_init)
Initial(Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP'), Gq_a_GTP_init)
Observable('obs_Gq_a_GDP', Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GDP'))
Observable('obs_Gq_a_GTP', Gq_a(Gq_a_b1=None, Gq_a_b2=None, Gq_a_s='GTP'))
Monomer('Gq_bg', ['Gq_bg_b1', 'Gq_bg_b2'])
Parameter('Gq_bg_init', 0.0037173)
Initial(Gq_bg(Gq_bg_b1=None, Gq_bg_b2=None), Gq_bg_init)
Observable('obs_Gq_bg', Gq_bg(Gq_bg_b1=None, Gq_bg_b2=None))
Monomer('RGS4', ['RGS4_b1'])
Parameter('RGS4_init', 0.019994)
Parameter('RGS4_Gq_a_GTP_init', 6.4168E-6)
Initial(RGS4(RGS4_b1=None), RGS4_init)
Initial(RGS4(RGS4_b1=50)%Gq_a(Gq_a_b1=50, Gq_a_b2=None, Gq_a_s='GTP'), RGS4_Gq_a_GTP_init)
Monomer('Ca', ['Ca_b1'])
Parameter('Ca_init', 0.1)
Initial(Ca(Ca_b1=None), Ca_init)
Observable('obs_Ca', Ca(Ca_b1=None))
Monomer('CaER')
Parameter('CaER_init', 1)
Initial(CaER(), CaER_init)
Observable('obs_CaER', CaER())
Monomer('PLCb', ['PLCb_b1', 'PLCb_b2'])
Parameter('PLCb_init', 0.090022)
Parameter('PLCb_Gq_a_GTP_init', 1.4492E-4)
Parameter('PLCb_Ca_init', 0.0093825)
Parameter('PLCb_Ca_Gq_a_GTP_init', 1.5038E-4)
Initial(PLCb(PLCb_b1=None, PLCb_b2=None), PLCb_init)
Initial(PLCb(PLCb_b1=60, PLCb_b2=None)%Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP'), PLCb_Gq_a_GTP_init)
Initial(PLCb(PLCb_b1=None, PLCb_b2=70)%Ca(Ca_b1=70), PLCb_Ca_init)
Initial(Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=70)%Ca(Ca_b1=70), PLCb_Ca_Gq_a_GTP_init)
Observable('obs_PLCb', PLCb(PLCb_b1=None, PLCb_b2=None))
Observable('obs_PLCb_Ca_Gq_a_GTP', Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=70)%Ca(Ca_b1=70))
Observable('obs_PLCb_Gq_a_GTP', Gq_a(Gq_a_b1=60, Gq_a_b2=None, Gq_a_s='GTP')%PLCb(PLCb_b1=60, PLCb_b2=None))
Observable('obs_PLCb_Ca', PLCb(PLCb_b1=None, PLCb_b2=70)%Ca(Ca_b1=70))
Monomer('PIP2', ['PIP2_b1'])
Parameter('PIP2_init', 2.6578)
Initial(PIP2(PIP2_b1=None), PIP2_init)
Observable('obs_PIP2', PIP2(PIP2_b1=None))
Monomer('IP3', ['IP3_b1'])
Parameter('IP3_init', 0.21952)
Initial(IP3(IP3_b1=None), IP3_init)
Observable('obs_IP3', IP3(IP3_b1=None))
Monomer('DAG', ['DAG_b1', 'DAG_s'], {'DAG_s' : ['act', 'inact']})
Parameter('DAG_init', 0.055555)
Initial(DAG(DAG_b1=None, DAG_s='inact'), DAG_init)
Observable('obs_DAG', DAG(DAG_b1=None, DAG_s='inact'))
Monomer('IP3R', ['IP3R_b1', 'IP3R_b2'])
Parameter('IP3R_init', 0.119)
Initial(IP3R(IP3R_b1=None, IP3R_b2=None), IP3R_init)
Observable('obs_IP3R', IP3R(IP3R_b1=None, IP3R_b2=None))
eter('R_Gq_trimer_init', R_conc*0.27)
Initial(R(R_b1=30, R_b2=None, R_s='inact')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None), R_Gq_trimer_init)
Parameter('R_L_Gq_trimer_init', 0)
Initial(R(R_b1=30, R_b2=50, R_s='act')%Gq_a(Gq_a_b1=30, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None)%L(L_b1=50), R_L_Gq_trimer_init)
Parameter('Gq_trimer_init', 0.61869)
Initial(Gq_a(Gq_a_b1=None, Gq_a_b2=40, Gq_a_s='GDP')%Gq_bg(Gq_bg_b1=40, Gq_bg_b2=None), Gq_trimer_init)
| true
| true
|
f717181beb4c4046a045a44b6d0f4db857d85911
| 4,211
|
py
|
Python
|
forte/processors/stanfordnlp_processor.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | 2
|
2021-01-01T12:07:27.000Z
|
2021-09-10T03:57:18.000Z
|
forte/processors/stanfordnlp_processor.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
forte/processors/stanfordnlp_processor.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Any, Dict
import stanza
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.processors.base import PackProcessor
from ft.onto.base_ontology import Token, Sentence, Dependency
__all__ = [
"StandfordNLPProcessor",
]
class StandfordNLPProcessor(PackProcessor):
def __init__(self):
super().__init__()
self.nlp = None
self.processors = set()
def set_up(self):
stanza.download(self.configs.lang, self.configs.dir)
self.processors = set(self.configs.processors.split(','))
# pylint: disable=unused-argument
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
self.set_up()
self.nlp = stanza.Pipeline(
lang=self.configs.lang,
dir=self.configs.dir,
use_gpu=self.configs.use_gpu,
processors=self.configs.processors,
)
@classmethod
def default_configs(cls) -> Dict[str, Any]:
"""
This defines a basic config structure for StanfordNLP.
:return:
"""
config = super().default_configs()
config.update(
{
'processors': 'tokenize,pos,lemma,depparse',
'lang': 'en',
# Language code for the language to build the Pipeline
'use_gpu': False,
'dir': '.',
})
return config
def _process(self, input_pack: DataPack):
doc = input_pack.text
if len(doc) == 0:
logging.warning("Find empty text in doc.")
# sentence parsing
sentences = self.nlp(doc).sentences
# Iterating through stanfordnlp sentence objects
for sentence in sentences:
Sentence(input_pack, sentence.tokens[0].start_char,
sentence.tokens[-1].end_char)
tokens: List[Token] = []
if "tokenize" in self.processors:
# Iterating through stanfordnlp word objects
for word in sentence.words:
misc = word.misc.split('|')
t_start = -1
t_end = -1
for m in misc:
k, v = m.split('=')
if k == 'start_char':
t_start = int(v)
elif k == 'end_char':
t_end = int(v)
if t_start < 0 or t_end < 0:
raise ValueError(
"Cannot determine word start or end for "
"stanfordnlp.")
token = Token(input_pack, t_start, t_end)
if "pos" in self.processors:
token.pos = word.pos
token.ud_xpos = word.xpos
if "lemma" in self.processors:
token.lemma = word.lemma
tokens.append(token)
# For each sentence, get the dependency relations among tokens
if "depparse" in self.processors:
# Iterating through token entries in current sentence
for token, word in zip(tokens, sentence.words):
child = token # current token
parent = tokens[word.head - 1] # Head token
relation_entry = Dependency(input_pack, parent, child)
relation_entry.rel_type = word.deprel
| 35.091667
| 74
| 0.566611
|
import logging
from typing import List, Any, Dict
import stanza
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.processors.base import PackProcessor
from ft.onto.base_ontology import Token, Sentence, Dependency
__all__ = [
"StandfordNLPProcessor",
]
class StandfordNLPProcessor(PackProcessor):
def __init__(self):
super().__init__()
self.nlp = None
self.processors = set()
def set_up(self):
stanza.download(self.configs.lang, self.configs.dir)
self.processors = set(self.configs.processors.split(','))
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
self.set_up()
self.nlp = stanza.Pipeline(
lang=self.configs.lang,
dir=self.configs.dir,
use_gpu=self.configs.use_gpu,
processors=self.configs.processors,
)
@classmethod
def default_configs(cls) -> Dict[str, Any]:
config = super().default_configs()
config.update(
{
'processors': 'tokenize,pos,lemma,depparse',
'lang': 'en',
'use_gpu': False,
'dir': '.',
})
return config
def _process(self, input_pack: DataPack):
doc = input_pack.text
if len(doc) == 0:
logging.warning("Find empty text in doc.")
sentences = self.nlp(doc).sentences
for sentence in sentences:
Sentence(input_pack, sentence.tokens[0].start_char,
sentence.tokens[-1].end_char)
tokens: List[Token] = []
if "tokenize" in self.processors:
for word in sentence.words:
misc = word.misc.split('|')
t_start = -1
t_end = -1
for m in misc:
k, v = m.split('=')
if k == 'start_char':
t_start = int(v)
elif k == 'end_char':
t_end = int(v)
if t_start < 0 or t_end < 0:
raise ValueError(
"Cannot determine word start or end for "
"stanfordnlp.")
token = Token(input_pack, t_start, t_end)
if "pos" in self.processors:
token.pos = word.pos
token.ud_xpos = word.xpos
if "lemma" in self.processors:
token.lemma = word.lemma
tokens.append(token)
if "depparse" in self.processors:
for token, word in zip(tokens, sentence.words):
child = token
parent = tokens[word.head - 1]
relation_entry = Dependency(input_pack, parent, child)
relation_entry.rel_type = word.deprel
| true
| true
|
f71718fb9e53b3bcc9413c4c7771e00095fa68d9
| 6,615
|
py
|
Python
|
tests/test_coverage_max_reduce.py
|
Erotemic/vtool_ibeis
|
b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5
|
[
"Apache-2.0"
] | 5
|
2015-04-17T11:27:00.000Z
|
2017-11-29T11:31:51.000Z
|
tests/test_coverage_max_reduce.py
|
Erotemic/vtool_ibeis
|
b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5
|
[
"Apache-2.0"
] | 2
|
2020-06-25T19:02:43.000Z
|
2020-06-30T19:33:27.000Z
|
tests/test_coverage_max_reduce.py
|
Erotemic/vtool_ibeis
|
b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5
|
[
"Apache-2.0"
] | 3
|
2016-07-04T18:22:56.000Z
|
2017-03-03T22:50:19.000Z
|
#Is it possible to use numpy.ufunc.reduce over an iterator of ndarrays?
#I have a generator function that yields ndarrays (all of the same shape and dtype) and I would like to find the maximum value at each index.
#Currently I have code that looks like this:
def main():
import numpy as np
import cv2
shape = (250, 300)
dsize = shape[::-1]
affmat_list = np.array([
[[ 1.57351554e+00, 0.00000000e+00, 1.09061039e+02],
[ -3.61827926e-01, 7.46059970e-01, 2.50669551e+01]],
[[ 3.05754491e+00, 0.00000000e+00, 8.28024922e+01],
[ -2.13866309e-01, 1.72124200e+00, 1.72744669e+02]],
[[ 2.58008254e+00, 0.00000000e+00, 1.52155447e+02],
[ -2.08041241e+00, 2.46195663e+00, 1.09493821e+02]],
[[ 2.01791864e+00, 0.00000000e+00, 2.45704669e+02],
[ -1.07590956e+00, 3.33499949e+00, 1.66233498e+02]],
[[ 3.32012638e+00, 0.00000000e+00, 1.03847866e+02],
[ -2.36557589e+00, 3.02063109e+00, 1.59907802e+02]],
[[ 4.94371474e+00, 0.00000000e+00, 7.92717193e+01],
[ -2.67846198e+00, 3.66854256e+00, 1.47888210e+02]]])
fx2_score = np.ones(len(affmat_list))
patch = np.array([
[ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014],
[ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],
[ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],
[ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],
[ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],
[ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],
[ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],
[ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0038, 0.0038, 0.0039, 0.0038, 0.0038, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],
[ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],
[ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],
[ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],
[ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],
[ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],
[ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],
[ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014]
])
def warped_patch_generator():
padded_patch = np.zeros(shape, dtype=np.float32)
patch_h, patch_w = patch.shape
warped = np.zeros(shape, dtype=np.float32)
for count, (M, score) in enumerate(zip(affmat_list, fx2_score)):
print(count)
np.multiply(patch, score, out=padded_patch[:patch.shape[0], :patch.shape[1]] )
cv2.warpAffine(padded_patch, M, dsize, dst=warped,
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
yield warped
#yield warped
print("THREE")
from six.moves import reduce
import functools
dstimg3 = np.zeros(shape, dtype=np.float32)
maximum_partial = functools.partial(np.maximum, out=dstimg3)
dstimg3 = reduce(maximum_partial, warped_patch_generator())
print("ONE")
dstimg1 = np.zeros(shape, dtype=np.float32)
print("ONE")
for warped in warped_patch_generator():
#dstimg1 = np.maximum(dstimg1, warped)
np.maximum(dstimg1, warped, out=dstimg1)
print("FOUR")
input_copy_ = np.array([w.copy() for w in warped_patch_generator()])
dstimg4 = input_copy_.max(0)
print("TWO")
dstimg2 = np.zeros(shape, dtype=np.float32)
input_iter_ = list((w for w in warped_patch_generator()))
np.maximum.reduce(input_iter_, axis=0, dtype=np.float32, out=dstimg2)
x = np.where(dstimg1.ravel() != dstimg2.ravel())[0]
print(dstimg2.take(x))
print(dstimg1.take(x))
np.allclose(dstimg1, dstimg2)
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(221)
plt.imshow(dstimg1)
plt.subplot(222)
plt.imshow(dstimg2)
plt.subplot(223)
plt.imshow(dstimg3)
plt.subplot(224)
plt.imshow(dstimg4)
plt.show()
if __name__ == '__main__':
main()
#I would have thought that I would be allowed to write something like this:
# dstimg = np.maximum.reduce(warped_patch_generator())
| 58.539823
| 177
| 0.579138
|
def main():
import numpy as np
import cv2
shape = (250, 300)
dsize = shape[::-1]
affmat_list = np.array([
[[ 1.57351554e+00, 0.00000000e+00, 1.09061039e+02],
[ -3.61827926e-01, 7.46059970e-01, 2.50669551e+01]],
[[ 3.05754491e+00, 0.00000000e+00, 8.28024922e+01],
[ -2.13866309e-01, 1.72124200e+00, 1.72744669e+02]],
[[ 2.58008254e+00, 0.00000000e+00, 1.52155447e+02],
[ -2.08041241e+00, 2.46195663e+00, 1.09493821e+02]],
[[ 2.01791864e+00, 0.00000000e+00, 2.45704669e+02],
[ -1.07590956e+00, 3.33499949e+00, 1.66233498e+02]],
[[ 3.32012638e+00, 0.00000000e+00, 1.03847866e+02],
[ -2.36557589e+00, 3.02063109e+00, 1.59907802e+02]],
[[ 4.94371474e+00, 0.00000000e+00, 7.92717193e+01],
[ -2.67846198e+00, 3.66854256e+00, 1.47888210e+02]]])
fx2_score = np.ones(len(affmat_list))
patch = np.array([
[ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014],
[ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],
[ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],
[ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],
[ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],
[ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],
[ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],
[ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0038, 0.0038, 0.0039, 0.0038, 0.0038, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],
[ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],
[ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],
[ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],
[ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],
[ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],
[ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],
[ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014]
])
def warped_patch_generator():
padded_patch = np.zeros(shape, dtype=np.float32)
patch_h, patch_w = patch.shape
warped = np.zeros(shape, dtype=np.float32)
for count, (M, score) in enumerate(zip(affmat_list, fx2_score)):
print(count)
np.multiply(patch, score, out=padded_patch[:patch.shape[0], :patch.shape[1]] )
cv2.warpAffine(padded_patch, M, dsize, dst=warped,
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
yield warped
print("THREE")
from six.moves import reduce
import functools
dstimg3 = np.zeros(shape, dtype=np.float32)
maximum_partial = functools.partial(np.maximum, out=dstimg3)
dstimg3 = reduce(maximum_partial, warped_patch_generator())
print("ONE")
dstimg1 = np.zeros(shape, dtype=np.float32)
print("ONE")
for warped in warped_patch_generator():
np.maximum(dstimg1, warped, out=dstimg1)
print("FOUR")
input_copy_ = np.array([w.copy() for w in warped_patch_generator()])
dstimg4 = input_copy_.max(0)
print("TWO")
dstimg2 = np.zeros(shape, dtype=np.float32)
input_iter_ = list((w for w in warped_patch_generator()))
np.maximum.reduce(input_iter_, axis=0, dtype=np.float32, out=dstimg2)
x = np.where(dstimg1.ravel() != dstimg2.ravel())[0]
print(dstimg2.take(x))
print(dstimg1.take(x))
np.allclose(dstimg1, dstimg2)
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(221)
plt.imshow(dstimg1)
plt.subplot(222)
plt.imshow(dstimg2)
plt.subplot(223)
plt.imshow(dstimg3)
plt.subplot(224)
plt.imshow(dstimg4)
plt.show()
if __name__ == '__main__':
main()
| true
| true
|
f7171a791e39a7775c3b4fa6f439304244a17891
| 698
|
py
|
Python
|
contracts_api_test/unit/test_base.py
|
codeforamerica/contracts-api
|
cc86380d36c6c5ad3028f557c342ab7d1fbb2ddc
|
[
"BSD-3-Clause"
] | null | null | null |
contracts_api_test/unit/test_base.py
|
codeforamerica/contracts-api
|
cc86380d36c6c5ad3028f557c342ab7d1fbb2ddc
|
[
"BSD-3-Clause"
] | null | null | null |
contracts_api_test/unit/test_base.py
|
codeforamerica/contracts-api
|
cc86380d36c6c5ad3028f557c342ab7d1fbb2ddc
|
[
"BSD-3-Clause"
] | 2
|
2020-01-09T03:37:56.000Z
|
2021-04-16T10:49:40.000Z
|
from flask.ext.testing import TestCase
from contracts_api.settings import TestConfig
from contracts_api.app import create_app as _create_app
from contracts_api.database import db
from contracts_api.api.models import Stage, Contract, StageProperty, ContractAudit, Flow
class BaseTestCase(TestCase):
'''
A base test case that boots our app
'''
def create_app(self):
return _create_app(TestConfig)
def setUp(self):
db.connect()
db.create_tables([Stage, Contract, StageProperty, ContractAudit, Flow], safe=True)
def tearDown(self):
db.connect()
db.drop_tables([Stage, Contract, StageProperty, ContractAudit, Flow])
db.close()
| 30.347826
| 90
| 0.719198
|
from flask.ext.testing import TestCase
from contracts_api.settings import TestConfig
from contracts_api.app import create_app as _create_app
from contracts_api.database import db
from contracts_api.api.models import Stage, Contract, StageProperty, ContractAudit, Flow
class BaseTestCase(TestCase):
def create_app(self):
return _create_app(TestConfig)
def setUp(self):
db.connect()
db.create_tables([Stage, Contract, StageProperty, ContractAudit, Flow], safe=True)
def tearDown(self):
db.connect()
db.drop_tables([Stage, Contract, StageProperty, ContractAudit, Flow])
db.close()
| true
| true
|
f7171ae01ae8fce11b09972bff09e3872d739860
| 395
|
py
|
Python
|
sayit/wsgi.py
|
SarahJaine/say-it
|
83ea1493a8f2934d7a82fbdd02113e338adff99f
|
[
"MIT"
] | null | null | null |
sayit/wsgi.py
|
SarahJaine/say-it
|
83ea1493a8f2934d7a82fbdd02113e338adff99f
|
[
"MIT"
] | 3
|
2017-02-26T23:58:56.000Z
|
2017-05-08T00:09:15.000Z
|
sayit/wsgi.py
|
SarahJaine/say-it
|
83ea1493a8f2934d7a82fbdd02113e338adff99f
|
[
"MIT"
] | null | null | null |
import os
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "sayit.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
public_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'public')
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
application.add_files(public_path, prefix='/')
| 28.214286
| 57
| 0.797468
|
import os
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "sayit.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
public_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'public')
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
application.add_files(public_path, prefix='/')
| true
| true
|
f7171afc41a48d20fb0a9e5cbef0798bf315d360
| 5,238
|
py
|
Python
|
src/cc_catalog_airflow/dags/provider_api_scripts/test_raw_pixel.py
|
sarayourfriend/openverse-catalog
|
b12ba815de782032f72ffa4f5620cfc8de8c84bd
|
[
"MIT"
] | null | null | null |
src/cc_catalog_airflow/dags/provider_api_scripts/test_raw_pixel.py
|
sarayourfriend/openverse-catalog
|
b12ba815de782032f72ffa4f5620cfc8de8c84bd
|
[
"MIT"
] | null | null | null |
src/cc_catalog_airflow/dags/provider_api_scripts/test_raw_pixel.py
|
sarayourfriend/openverse-catalog
|
b12ba815de782032f72ffa4f5620cfc8de8c84bd
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
from unittest.mock import patch
from common import LicenseInfo, MockImageStore
import raw_pixel as rwp
_license_info = (
'cc0',
'1.0',
'https://creativecommons.org/publicdomain/zero/1.0/',
None
)
license_info = LicenseInfo(*_license_info)
rwp.image_store = MockImageStore(
provider=rwp.PROVIDER,
license_info=license_info
)
RESOURCES = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "tests/resources/rawpixel"
)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s: %(message)s",
level=logging.DEBUG,
)
def _get_resource_json(json_name):
with open(os.path.join(RESOURCES, json_name)) as f:
resource_json = json.load(f)
return resource_json
def test_get_image_list_giving_none():
with patch.object(rwp, "_request_content", return_value=None):
total, result = rwp._get_image_list()
assert total is None
assert result is None
def test_get_image_list_correct():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
total, result = rwp._get_image_list()
assert total == 22215
assert len(result) == 1
def test_process_pages_giving_zero():
with patch.object(rwp, "_request_content", return_value=None):
total, result = rwp._get_image_list()
img_ctr = rwp._process_pages(total, result, page=1)
assert img_ctr == 0
def test_process_image_data():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
assert rwp._process_image_data(image=result[0]) == 1
def test_get_foreign_id_url():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
foreign_id, foreign_url = rwp._get_foreign_id_url(image=result[0])
assert foreign_id == 2041320
assert (
foreign_url
== "https://www.rawpixel.com/image/2041320/world-map-drawn-oval-projection"
)
def test_get_image_properties():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
img_url, width, height, thumbnail = rwp._get_image_properties(
image=result[0], foreign_url=""
)
assert (
img_url
== ("https://img.rawpixel.com/s3fs-private/rawpixel_images/"
"website_content/pdmaps-loc-06-nam_1.jpg?w=1200&h=630&fit="
"crop&dpr=1.5&crop=entropy&fm=pjpg&q=75&vib=3&con=3&usm=15&"
"markpad=13&markalpha=90&markscale=10&markx=25&mark=rawpixel"
"-watermark.png&cs=srgb&bg=F4F4F3&ixlib=js-2.2.1&s=edbf5b4204"
"30b7f118a0093686c40f93")
)
assert width == "1200"
assert height == "630"
assert (
thumbnail
== ("https://img.rawpixel.com/s3fs-private/rawpixel_images/"
"website_content/pdmaps-loc-06-nam_1.jpg?w=400&dpr=1&fit"
"=default&crop=default&auto=format&fm=pjpg&q=75&vib=3&con="
"3&usm=15&bg=F4F4F3&ixlib=js-2.2.1&s=6f33bfab36227436a0f9ad230"
"fc1d64a")
)
def test_get_title_owner():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
title, owner = rwp._get_title_owner(image=result[0])
assert title == "World map drawn on an oval projection"
assert owner == "Library of Congress"
def test_get_meta_data_given_pinterest_descr_is_present():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
meta_data = rwp._get_meta_data(image=result[0])
expected_descr_value = (
"Portolan atlas of the Mediterranean Sea, western Europe, and the"
" northwest coast of Africa: World map drawn on an oval projection"
" (ca. 1590) by Joan Oliva. Original from Library of Congress. "
"Digitally enhanced by rawpixel. | free image by rawpixel.com / "
"Library of Congress (Source)")
expected_meta_data = {"description": expected_descr_value}
assert meta_data == expected_meta_data
def test_get_meta_data_given_no_pinterest_descr():
r = _get_resource_json("total_images_but_pinterest_descr_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
meta_data = rwp._get_meta_data(image=result[0])
assert meta_data == {}
def test_get_tags():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
tags = rwp._get_tags(image=result[0])
assert len(tags) == 47
assert tags[0] == "america"
| 35.876712
| 87
| 0.657312
|
import json
import logging
import os
from unittest.mock import patch
from common import LicenseInfo, MockImageStore
import raw_pixel as rwp
_license_info = (
'cc0',
'1.0',
'https://creativecommons.org/publicdomain/zero/1.0/',
None
)
license_info = LicenseInfo(*_license_info)
rwp.image_store = MockImageStore(
provider=rwp.PROVIDER,
license_info=license_info
)
RESOURCES = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "tests/resources/rawpixel"
)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s: %(message)s",
level=logging.DEBUG,
)
def _get_resource_json(json_name):
with open(os.path.join(RESOURCES, json_name)) as f:
resource_json = json.load(f)
return resource_json
def test_get_image_list_giving_none():
with patch.object(rwp, "_request_content", return_value=None):
total, result = rwp._get_image_list()
assert total is None
assert result is None
def test_get_image_list_correct():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
total, result = rwp._get_image_list()
assert total == 22215
assert len(result) == 1
def test_process_pages_giving_zero():
with patch.object(rwp, "_request_content", return_value=None):
total, result = rwp._get_image_list()
img_ctr = rwp._process_pages(total, result, page=1)
assert img_ctr == 0
def test_process_image_data():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
assert rwp._process_image_data(image=result[0]) == 1
def test_get_foreign_id_url():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
foreign_id, foreign_url = rwp._get_foreign_id_url(image=result[0])
assert foreign_id == 2041320
assert (
foreign_url
== "https://www.rawpixel.com/image/2041320/world-map-drawn-oval-projection"
)
def test_get_image_properties():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
img_url, width, height, thumbnail = rwp._get_image_properties(
image=result[0], foreign_url=""
)
assert (
img_url
== ("https://img.rawpixel.com/s3fs-private/rawpixel_images/"
"website_content/pdmaps-loc-06-nam_1.jpg?w=1200&h=630&fit="
"crop&dpr=1.5&crop=entropy&fm=pjpg&q=75&vib=3&con=3&usm=15&"
"markpad=13&markalpha=90&markscale=10&markx=25&mark=rawpixel"
"-watermark.png&cs=srgb&bg=F4F4F3&ixlib=js-2.2.1&s=edbf5b4204"
"30b7f118a0093686c40f93")
)
assert width == "1200"
assert height == "630"
assert (
thumbnail
== ("https://img.rawpixel.com/s3fs-private/rawpixel_images/"
"website_content/pdmaps-loc-06-nam_1.jpg?w=400&dpr=1&fit"
"=default&crop=default&auto=format&fm=pjpg&q=75&vib=3&con="
"3&usm=15&bg=F4F4F3&ixlib=js-2.2.1&s=6f33bfab36227436a0f9ad230"
"fc1d64a")
)
def test_get_title_owner():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
title, owner = rwp._get_title_owner(image=result[0])
assert title == "World map drawn on an oval projection"
assert owner == "Library of Congress"
def test_get_meta_data_given_pinterest_descr_is_present():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
meta_data = rwp._get_meta_data(image=result[0])
expected_descr_value = (
"Portolan atlas of the Mediterranean Sea, western Europe, and the"
" northwest coast of Africa: World map drawn on an oval projection"
" (ca. 1590) by Joan Oliva. Original from Library of Congress. "
"Digitally enhanced by rawpixel. | free image by rawpixel.com / "
"Library of Congress (Source)")
expected_meta_data = {"description": expected_descr_value}
assert meta_data == expected_meta_data
def test_get_meta_data_given_no_pinterest_descr():
r = _get_resource_json("total_images_but_pinterest_descr_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
meta_data = rwp._get_meta_data(image=result[0])
assert meta_data == {}
def test_get_tags():
r = _get_resource_json("total_images_example.json")
with patch.object(rwp, "_request_content", return_value=r):
result = rwp._get_image_list()[1]
tags = rwp._get_tags(image=result[0])
assert len(tags) == 47
assert tags[0] == "america"
| true
| true
|
f7171bff1d3cb168e55e72b45b4b3d0194d1ce5e
| 65,057
|
py
|
Python
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | null | null | null |
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | null | null | null |
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2
description:
- Creates or terminates ec2 instances.
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
aliases: ['keypair']
id:
version_added: "1.1"
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
group:
description:
- security group (or list of groups) to use with the instance
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used.
If not specified then the value of the EC2_REGION environment variable, if any, is used.
See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
required: true
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC.
Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
default: default
choices: [ "default", "dedicated" ]
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid.
When it is filled, the instance is started.
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
default: "one-time"
choices: [ "one-time", "persistent" ]
image:
description:
- I(ami) ID to use for the instance
required: true
kernel:
description:
- kernel I(eki) to use for the instance
ramdisk:
description:
- ramdisk I(eri) to use for the instance
wait:
description:
- wait for the instance to reach its desired state before returning. Does not wait for SSH, see 'wait_for_connection' example for details.
type: bool
default: 'no'
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
spot_wait_timeout:
version_added: "1.5"
description:
- how long to wait for the spot instance request to be fulfilled
default: 600
count:
description:
- number of instances to launch
default: 1
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
type: bool
default: 'no'
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
type: bool
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+
instance_ids:
version_added: "1.3"
description:
- "list of instance ids, currently used for states: absent, running, stopped"
aliases: ['instance_id']
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
When initially creating an instance the EC2 API defaults this to True.
type: bool
termination_protection:
version_added: "2.0"
description:
- Enable or Disable the Termination Protection
type: bool
default: 'no'
instance_initiated_shutdown_behavior:
version_added: "2.2"
description:
- Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store
images (which require termination on shutdown).
default: 'stop'
choices: [ "stop", "terminate" ]
state:
version_added: "1.3"
description:
- create, terminate, start, stop or restart instances.
The state 'restarted' was added in 2.2
required: false
default: 'present'
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
volumes:
version_added: "1.5"
description:
- a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed
are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str),
encrypted (bool; False), snapshot (str), volume_type (str), volume_size (int, GB), iops (int) - device_type
is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
default: 'no'
exact_count:
version_added: "1.5"
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
Instances are either created or terminated based on this value.
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running.
This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option.
network_interfaces:
version_added: "2.0"
description:
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are
for creating a new network interface at launch.)
aliases: ['network_interface']
spot_launch_group:
version_added: "2.1"
description:
- Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group)
author:
- "Tim Gerla (@tgerla)"
- "Lester Wade (@lwade)"
- "Seth Vidal"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ec2:
key_name: mykey
instance_type: t2.micro
image: ami-123456
wait: yes
group: webserver
count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch
- ec2:
key_name: mykey
group: databases
instance_type: t2.micro
image: ami-123456
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with ssd gp2 root volume
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/xvda
volume_type: gp2
volume_size: 8
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
count_tag:
Name: dbserver
exact_count: 1
# Multiple groups example
- ec2:
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
spot_launch_group: report_generators
# Examples using pre-existing network interfaces
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interface: eni-deadbeef
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
keypair: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
- name: Add new instance to host group
add_host:
hostname: "{{ item.public_ip }}"
groupname: launched
with_items: "{{ ec2.instances }}"
- name: Wait for SSH to come up
delegate_to: "{{ item.public_dns_name }}"
wait_for_connection:
delay: 60
timeout: 320
with_items: "{{ ec2.instances }}"
- name: Configure instance(s)
hosts: launched
become: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
ec2:
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
roles:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Start stopped instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: running
#
# Restart instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: restarted
#
# Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# count_tag complex argument examples
#
# instances with tag foo
- ec2:
count_tag:
foo:
# instances with tag foo=bar
- ec2:
count_tag:
foo: bar
# instances with tags foo=bar & baz
- ec2:
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
- ec2:
count_tag:
- foo
- bar
- baz: bang
'''
import time
import datetime
import traceback
from ast import literal_eval
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
from ansible.module_utils.six import get_function_code, string_types
from ansible.module_utils._text import to_bytes, to_text
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
from boto import connect_ec2_endpoint
from boto import connect_vpc
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
# get reservations for instances that match tag(s) and are in the desired state
state = module.params.get('state')
if state not in ['running', 'stopped']:
state = None
reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
if inst.state == 'terminated':
continue
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result:
if isinstance(result[k], dict):
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
# TODO: filters do not work with tags that have underscores
filters = dict()
vpc_subnet_id = module.params.get('vpc_subnet_id')
vpc_id = None
if vpc_subnet_id:
filters.update({"subnet-id": vpc_subnet_id})
if vpc:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
if vpc_id:
filters.update({"vpc-id": vpc_id})
if tags is not None:
if isinstance(tags, str):
try:
tags = literal_eval(tags)
except:
pass
# if not a string type, convert and make sure it's a text string
if isinstance(tags, int):
tags = to_text(tags)
# if string, we only care that a tag of that name exists
if isinstance(tags, str):
filters.update({"tag-key": tags})
# if list, append each item to filters
if isinstance(tags, list):
for x in tags:
if isinstance(x, dict):
x = _set_none_to_blank(x)
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if isinstance(tags, dict):
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
# lets check to see if the filters dict is empty, if so then stop
if not filters:
module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
if zone:
filters.update({'availability-zone': zone})
if module.params.get('id'):
filters['client-token'] = module.params['id']
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
"""
Retrieves instance information from an instance
ID and returns it as a dictionary
"""
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor,
'tags': inst.tags,
'groups': dict((group.id, group.name) for group in inst.groups),
}
try:
instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
try:
instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
except AttributeError:
instance_info['ebs_optimized'] = False
try:
bdm_dict = {}
bdm = getattr(inst, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination
}
instance_info['block_device_mapping'] = bdm_dict
except AttributeError:
instance_info['block_device_mapping'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
"""
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
class. Added in Boto 2.13.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accepts associate_public_ip_address argument, else false
"""
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
# device_type has been used historically to represent volume_type,
# however ec2_vol uses volume_type, as does the BlockDeviceType, so
# we add handling for either/or but not both
if all(key in volume for key in ['device_type', 'volume_type']):
module.fail_json(msg='device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
if 'device_type' in volume:
module.deprecate('device_type is deprecated for block devices - use volume_type instead',
version=2.9)
# get whichever one is set, or NoneType if neither are set
volume_type = volume.get('device_type') or volume.get('volume_type')
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if volume_type == 'io1' and 'iops' not in volume:
module.fail_json(msg='io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'encrypted' in volume:
module.fail_json(msg='You can not set encryption when creating a volume from a snapshot')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg='Cannot set both ephemeral and snapshot')
if boto_supports_volume_encryption():
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'),
encrypted=volume.get('encrypted', None))
else:
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
ec2: authenticated ec2 connection object
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
return param in get_function_code(method).co_varnames
def await_spot_requests(module, ec2, spot_requests, count):
"""
Wait for a group of spot requests to be fulfilled, or fail.
module: Ansible module object
ec2: authenticated ec2 connection object
spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
count: Total number of instances to be created by the spot requests
Returns:
list of instance ID's created by the spot request(s)
"""
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
wait_complete = time.time() + spot_wait_timeout
spot_req_inst_ids = dict()
while time.time() < wait_complete:
reqs = ec2.get_all_spot_instance_requests()
for sirb in spot_requests:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id != sirb.id:
continue # this is not our spot instance
if sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
elif sir.state == 'open':
continue # still waiting, nothing to do here
elif sir.state == 'active':
continue # Instance is created already, nothing to do here
elif sir.state == 'failed':
module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
sir.id, sir.status.code, sir.fault.code, sir.fault.message))
elif sir.state == 'cancelled':
module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
elif sir.state == 'closed':
# instance is terminating or marked for termination
# this may be intentional on the part of the operator,
# or it may have been terminated by AWS due to capacity,
# price, or group constraints in this case, we'll fail
# the module if the reason for the state is anything
# other than termination by user. Codes are documented at
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
if sir.status.code == 'instance-terminated-by-user':
# do nothing, since the user likely did this on purpose
pass
else:
spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
return list(spot_req_inst_ids.values())
module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
def enforce_count(module, ec2, vpc):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
# fail here if the exact count was specified without filtering
# on a tag, as this may lead to a undesired removal of instances
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
changed = None
checkmode = False
instance_dict_array = []
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, vpc, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([x.id for x in instances])
remove_ids = all_instance_ids[0:to_remove]
instances = [x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
if not isinstance(inst, dict):
warn_if_public_ip_assignment_changed(module, inst)
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, vpc, override_count=None):
"""
Creates new instances
module : AnsibleModule object
ec2: authenticated ec2 connection object
Returns:
A list of dictionaries with instance information
about the instances that were launched
"""
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
termination_protection = module.boolean(module.params.get('termination_protection'))
network_interfaces = module.params.get('network_interfaces')
spot_launch_group = module.params.get('spot_launch_group')
instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
vpc_id = None
if vpc_subnet_id:
if not vpc:
module.fail_json(msg="region must be specified")
else:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
else:
vpc_id = None
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
if vpc_id:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
else:
grp_details = ec2.get_all_security_groups()
if isinstance(group_name, string_types):
group_name = [group_name]
unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
if len(unmatched) > 0:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
# Now we try to lookup the group id testing if group exists.
elif group_id:
# wrap the group_id in a list if it's not one already
if isinstance(group_id, string_types):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id is not None:
filter_dict = {'client-token': id, 'instance-state-name': 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'monitoring_enabled': monitoring,
'placement': zone,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'user_data': to_bytes(user_data, errors='surrogate_or_strict')}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
if not spot_price:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip is not None:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
if network_interfaces:
if isinstance(network_interfaces, string_types):
network_interfaces = [network_interfaces]
interfaces = []
for i, network_interface_id in enumerate(network_interfaces):
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
network_interface_id=network_interface_id,
device_index=i)
interfaces.append(interface)
params['network_interfaces'] = \
boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg='Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
# check to see if we're using spot pricing first before starting instances
if not spot_price:
if assign_public_ip is not None and private_ip:
params.update(
dict(
min_count=count_remaining,
max_count=count_remaining,
client_token=id,
placement_group=placement_group,
)
)
else:
params.update(
dict(
min_count=count_remaining,
max_count=count_remaining,
client_token=id,
placement_group=placement_group,
private_ip_address=private_ip,
)
)
# For ordinary (not spot) instances, we can select 'stop'
# (the default) or 'terminate' here.
params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
try:
res = ec2.run_instances(**params)
except boto.exception.EC2ResponseError as e:
if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
"InvalidParameterCombination" == e.error_code):
params['instance_initiated_shutdown_behavior'] = 'terminate'
res = ec2.run_instances(**params)
else:
raise
instids = [i.id for i in res.instances]
while True:
try:
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg=str(e))
# The instances returned through ec2.run_instances above can be in
# terminated state due to idempotency. See commit 7f11c3d for a complete
# explanation.
terminated_instances = [
str(instance.id) for instance in res.instances if instance.state == 'terminated'
]
if terminated_instances:
module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, 'placement_group'):
params['placement_group'] = placement_group
elif placement_group:
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
# You can't tell spot instances to 'stop'; they will always be
# 'terminate'd. For convenience, we'll ignore the latter value.
if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
module.fail_json(
msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
if spot_launch_group and isinstance(spot_launch_group, string_types):
params['launch_group'] = spot_launch_group
params.update(dict(
count=count_remaining,
type=spot_type,
))
# Set spot ValidUntil
# ValidUntil -> (timestamp). The end date of the request, in
# UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z).
utc_valid_until = (
datetime.datetime.utcnow()
+ datetime.timedelta(seconds=spot_wait_timeout))
params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
res = ec2.request_spot_instances(spot_price, **params)
# Now we have to do the intermediate waiting
if wait:
instids = await_spot_requests(module, ec2, res, count)
else:
instids = []
except boto.exception.BotoServerError as e:
module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
num_running = 0
wait_timeout = time.time() + wait_timeout
res_list = ()
while wait_timeout > time.time() and num_running < len(instids):
try:
res_list = ec2.get_all_instances(instids)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstanceID.NotFound':
time.sleep(1)
continue
else:
raise
num_running = 0
for res in res_list:
num_running += len([i for i in res.instances if i.state == 'running'])
if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
# We do this after the loop ends so that we end up with one list
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by AWS
if source_dest_check is False:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
# Disabled by default by AWS
if termination_protection is True:
for inst in res.instances:
inst.modify_attribute('disableApiTermination', True)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags and instids:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
inst.update()
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
"""
Terminates a list of instances
module: Ansible module object
ec2: authenticated ec2 connection object
termination_list: a list of instances to terminate in the form of
[ {id: <inst-id>}, ..]
Returns a dictionary of instance information
about the instances terminated.
If the instance to be terminated is running
"changed" will be set to False.
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError as e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
# wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
filters={'instance-state-name': 'terminated'})
try:
num_terminated = sum([len(res.instances) for res in response])
except Exception as e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
# Lets get the current state of the instances after terminating - issue600
instance_dict_array = []
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
for inst in res.instances:
instance_dict_array.append(get_instance_info(inst))
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state, instance_tags):
"""
Starts or stops a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("running" or "stopped")
Returns a dictionary of instance information
about the instances started/stopped.
If the instance was not able to change state,
"changed" will be set to False.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
group_id = module.params.get('group_id')
group_name = module.params.get('group')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
if module.params.get('id'):
filters['client-token'] = module.params['id']
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
existing_instances_array = []
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
warn_if_public_ip_assignment_changed(module, inst)
changed = (check_source_dest_attr(module, inst, ec2) or
check_termination_protection(module, inst) or changed)
# Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
if inst.vpc_id and group_name:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
if isinstance(group_name, string_types):
group_name = [group_name]
unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
if unmatched:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
elif inst.vpc_id and group_id:
if isinstance(group_id, string_types):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_ids = [grp_item.id for grp_item in grp_details]
if inst.vpc_id and (group_name or group_id):
if set(sg.id for sg in inst.groups) != set(group_ids):
changed = inst.modify_attribute('groupSet', group_ids)
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
existing_instances_array.append(inst.id)
instance_ids = list(set(existing_instances_array + (instance_ids or [])))
# Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
instance_dict_array = []
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
instance_dict_array.append(get_instance_info(i))
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def restart_instances(module, ec2, instance_ids, state, instance_tags):
"""
Restarts a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("restarted")
Returns a dictionary of instance information
about the instances.
If the instance was not able to change state,
"changed" will be set to False.
Wait will not apply here as this is a OS level operation.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two.
"""
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
if module.params.get('id'):
filters['client-token'] = module.params['id']
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
warn_if_public_ip_assignment_changed(module, inst)
changed = (check_source_dest_attr(module, inst, ec2) or
check_termination_protection(module, inst) or changed)
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
inst.reboot()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
return (changed, instance_dict_array, instance_ids)
def check_termination_protection(module, inst):
"""
Check the instance disableApiTermination attribute.
module: Ansible module object
inst: EC2 instance object
returns: True if state changed None otherwise
"""
termination_protection = module.params.get('termination_protection')
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
return True
def check_source_dest_attr(module, inst, ec2):
"""
Check the instance sourceDestCheck attribute.
module: Ansible module object
inst: EC2 instance object
returns: True if state changed None otherwise
"""
source_dest_check = module.params.get('source_dest_check')
if source_dest_check is not None:
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
return True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
return True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc())
def warn_if_public_ip_assignment_changed(module, instance):
# This is a non-modifiable attribute.
assign_public_ip = module.params.get('assign_public_ip')
# Check that public ip assignment is the same and warn if not
public_dns_name = getattr(instance, 'public_dns_name', None)
if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
"Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
key_name=dict(aliases=['keypair']),
id=dict(),
group=dict(type='list', aliases=['groups']),
group_id=dict(type='list'),
zone=dict(aliases=['aws_zone', 'ec2_zone']),
instance_type=dict(aliases=['type']),
spot_price=dict(),
spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
spot_launch_group=dict(),
image=dict(),
kernel=dict(),
count=dict(type='int', default='1'),
monitoring=dict(type='bool', default=False),
ramdisk=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=300),
spot_wait_timeout=dict(default=600),
placement_group=dict(),
user_data=dict(),
instance_tags=dict(type='dict'),
vpc_subnet_id=dict(),
assign_public_ip=dict(type='bool'),
private_ip=dict(),
instance_profile_name=dict(),
instance_ids=dict(type='list', aliases=['instance_id']),
source_dest_check=dict(type='bool', default=None),
termination_protection=dict(type='bool', default=None),
state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
exact_count=dict(type='int', default=None),
count_tag=dict(),
volumes=dict(type='list'),
ebs_optimized=dict(type='bool', default=False),
tenancy=dict(default='default'),
network_interfaces=dict(type='list', aliases=['network_interface'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['group_name', 'group_id'],
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids'],
['network_interfaces', 'assign_public_ip'],
['network_interfaces', 'group'],
['network_interfaces', 'group_id'],
['network_interfaces', 'private_ip'],
['network_interfaces', 'vpc_subnet_id'],
],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if module.params.get('region') or not module.params.get('ec2_url'):
ec2 = ec2_connect(module)
elif module.params.get('ec2_url'):
ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
if 'region' not in aws_connect_kwargs:
aws_connect_kwargs['region'] = ec2.region
vpc = connect_vpc(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
tagged_instances = []
state = module.params['state']
if state == 'absent':
instance_ids = module.params['instance_ids']
if not instance_ids:
module.fail_json(msg='instance_ids list is required for absent state')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
elif state in ('restarted'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
# Always return instances in the same order
if new_instance_ids:
new_instance_ids.sort()
if instance_dict_array:
instance_dict_array.sort(key=lambda x: x['id'])
if tagged_instances:
tagged_instances.sort(key=lambda x: x['id'])
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
if __name__ == '__main__':
main()
| 38.201409
| 156
| 0.627957
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2
description:
- Creates or terminates ec2 instances.
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
aliases: ['keypair']
id:
version_added: "1.1"
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
group:
description:
- security group (or list of groups) to use with the instance
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used.
If not specified then the value of the EC2_REGION environment variable, if any, is used.
See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
required: true
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC.
Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
default: default
choices: [ "default", "dedicated" ]
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid.
When it is filled, the instance is started.
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
default: "one-time"
choices: [ "one-time", "persistent" ]
image:
description:
- I(ami) ID to use for the instance
required: true
kernel:
description:
- kernel I(eki) to use for the instance
ramdisk:
description:
- ramdisk I(eri) to use for the instance
wait:
description:
- wait for the instance to reach its desired state before returning. Does not wait for SSH, see 'wait_for_connection' example for details.
type: bool
default: 'no'
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
spot_wait_timeout:
version_added: "1.5"
description:
- how long to wait for the spot instance request to be fulfilled
default: 600
count:
description:
- number of instances to launch
default: 1
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
type: bool
default: 'no'
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
type: bool
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+
instance_ids:
version_added: "1.3"
description:
- "list of instance ids, currently used for states: absent, running, stopped"
aliases: ['instance_id']
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
When initially creating an instance the EC2 API defaults this to True.
type: bool
termination_protection:
version_added: "2.0"
description:
- Enable or Disable the Termination Protection
type: bool
default: 'no'
instance_initiated_shutdown_behavior:
version_added: "2.2"
description:
- Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store
images (which require termination on shutdown).
default: 'stop'
choices: [ "stop", "terminate" ]
state:
version_added: "1.3"
description:
- create, terminate, start, stop or restart instances.
The state 'restarted' was added in 2.2
required: false
default: 'present'
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
volumes:
version_added: "1.5"
description:
- a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed
are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str),
encrypted (bool; False), snapshot (str), volume_type (str), volume_size (int, GB), iops (int) - device_type
is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
default: 'no'
exact_count:
version_added: "1.5"
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
Instances are either created or terminated based on this value.
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running.
This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option.
network_interfaces:
version_added: "2.0"
description:
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are
for creating a new network interface at launch.)
aliases: ['network_interface']
spot_launch_group:
version_added: "2.1"
description:
- Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group)
author:
- "Tim Gerla (@tgerla)"
- "Lester Wade (@lwade)"
- "Seth Vidal"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ec2:
key_name: mykey
instance_type: t2.micro
image: ami-123456
wait: yes
group: webserver
count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch
- ec2:
key_name: mykey
group: databases
instance_type: t2.micro
image: ami-123456
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with ssd gp2 root volume
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/xvda
volume_type: gp2
volume_size: 8
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
count_tag:
Name: dbserver
exact_count: 1
# Multiple groups example
- ec2:
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
spot_launch_group: report_generators
# Examples using pre-existing network interfaces
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interface: eni-deadbeef
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
keypair: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
- name: Add new instance to host group
add_host:
hostname: "{{ item.public_ip }}"
groupname: launched
with_items: "{{ ec2.instances }}"
- name: Wait for SSH to come up
delegate_to: "{{ item.public_dns_name }}"
wait_for_connection:
delay: 60
timeout: 320
with_items: "{{ ec2.instances }}"
- name: Configure instance(s)
hosts: launched
become: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
ec2:
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
roles:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Start stopped instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: running
#
# Restart instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: restarted
#
# Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# count_tag complex argument examples
#
# instances with tag foo
- ec2:
count_tag:
foo:
# instances with tag foo=bar
- ec2:
count_tag:
foo: bar
# instances with tags foo=bar & baz
- ec2:
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
- ec2:
count_tag:
- foo
- bar
- baz: bang
'''
import time
import datetime
import traceback
from ast import literal_eval
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
from ansible.module_utils.six import get_function_code, string_types
from ansible.module_utils._text import to_bytes, to_text
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
from boto import connect_ec2_endpoint
from boto import connect_vpc
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
state = module.params.get('state')
if state not in ['running', 'stopped']:
state = None
reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
if inst.state == 'terminated':
continue
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result:
if isinstance(result[k], dict):
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
filters = dict()
vpc_subnet_id = module.params.get('vpc_subnet_id')
vpc_id = None
if vpc_subnet_id:
filters.update({"subnet-id": vpc_subnet_id})
if vpc:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
if vpc_id:
filters.update({"vpc-id": vpc_id})
if tags is not None:
if isinstance(tags, str):
try:
tags = literal_eval(tags)
except:
pass
if isinstance(tags, int):
tags = to_text(tags)
# if string, we only care that a tag of that name exists
if isinstance(tags, str):
filters.update({"tag-key": tags})
# if list, append each item to filters
if isinstance(tags, list):
for x in tags:
if isinstance(x, dict):
x = _set_none_to_blank(x)
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if isinstance(tags, dict):
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
# lets check to see if the filters dict is empty, if so then stop
if not filters:
module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
if zone:
filters.update({'availability-zone': zone})
if module.params.get('id'):
filters['client-token'] = module.params['id']
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor,
'tags': inst.tags,
'groups': dict((group.id, group.name) for group in inst.groups),
}
try:
instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
try:
instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
except AttributeError:
instance_info['ebs_optimized'] = False
try:
bdm_dict = {}
bdm = getattr(inst, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination
}
instance_info['block_device_mapping'] = bdm_dict
except AttributeError:
instance_info['block_device_mapping'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
def boto_supports_volume_encryption():
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
# device_type has been used historically to represent volume_type,
# however ec2_vol uses volume_type, as does the BlockDeviceType, so
# we add handling for either/or but not both
if all(key in volume for key in ['device_type', 'volume_type']):
module.fail_json(msg='device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
if 'device_type' in volume:
module.deprecate('device_type is deprecated for block devices - use volume_type instead',
version=2.9)
# get whichever one is set, or NoneType if neither are set
volume_type = volume.get('device_type') or volume.get('volume_type')
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if volume_type == 'io1' and 'iops' not in volume:
module.fail_json(msg='io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'encrypted' in volume:
module.fail_json(msg='You can not set encryption when creating a volume from a snapshot')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg='Cannot set both ephemeral and snapshot')
if boto_supports_volume_encryption():
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'),
encrypted=volume.get('encrypted', None))
else:
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def boto_supports_param_in_spot_request(ec2, param):
method = getattr(ec2, 'request_spot_instances')
return param in get_function_code(method).co_varnames
def await_spot_requests(module, ec2, spot_requests, count):
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
wait_complete = time.time() + spot_wait_timeout
spot_req_inst_ids = dict()
while time.time() < wait_complete:
reqs = ec2.get_all_spot_instance_requests()
for sirb in spot_requests:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id != sirb.id:
continue # this is not our spot instance
if sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
elif sir.state == 'open':
continue # still waiting, nothing to do here
elif sir.state == 'active':
continue # Instance is created already, nothing to do here
elif sir.state == 'failed':
module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
sir.id, sir.status.code, sir.fault.code, sir.fault.message))
elif sir.state == 'cancelled':
module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
elif sir.state == 'closed':
# instance is terminating or marked for termination
# this may be intentional on the part of the operator,
# or it may have been terminated by AWS due to capacity,
# price, or group constraints in this case, we'll fail
if sir.status.code == 'instance-terminated-by-user':
pass
else:
spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
return list(spot_req_inst_ids.values())
module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
def enforce_count(module, ec2, vpc):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
changed = None
checkmode = False
instance_dict_array = []
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, vpc, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([x.id for x in instances])
remove_ids = all_instance_ids[0:to_remove]
instances = [x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
all_instances = []
for inst in instances:
if not isinstance(inst, dict):
warn_if_public_ip_assignment_changed(module, inst)
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, vpc, override_count=None):
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
termination_protection = module.boolean(module.params.get('termination_protection'))
network_interfaces = module.params.get('network_interfaces')
spot_launch_group = module.params.get('spot_launch_group')
instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
vpc_id = None
if vpc_subnet_id:
if not vpc:
module.fail_json(msg="region must be specified")
else:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
else:
vpc_id = None
try:
if group_name:
if vpc_id:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
else:
grp_details = ec2.get_all_security_groups()
if isinstance(group_name, string_types):
group_name = [group_name]
unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
if len(unmatched) > 0:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
elif group_id:
if isinstance(group_id, string_types):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id is not None:
filter_dict = {'client-token': id, 'instance-state-name': 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'monitoring_enabled': monitoring,
'placement': zone,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'user_data': to_bytes(user_data, errors='surrogate_or_strict')}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
if not spot_price:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip is not None:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
if network_interfaces:
if isinstance(network_interfaces, string_types):
network_interfaces = [network_interfaces]
interfaces = []
for i, network_interface_id in enumerate(network_interfaces):
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
network_interface_id=network_interface_id,
device_index=i)
interfaces.append(interface)
params['network_interfaces'] = \
boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg='Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
if not spot_price:
if assign_public_ip is not None and private_ip:
params.update(
dict(
min_count=count_remaining,
max_count=count_remaining,
client_token=id,
placement_group=placement_group,
)
)
else:
params.update(
dict(
min_count=count_remaining,
max_count=count_remaining,
client_token=id,
placement_group=placement_group,
private_ip_address=private_ip,
)
)
# For ordinary (not spot) instances, we can select 'stop'
# (the default) or 'terminate' here.
params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
try:
res = ec2.run_instances(**params)
except boto.exception.EC2ResponseError as e:
if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
"InvalidParameterCombination" == e.error_code):
params['instance_initiated_shutdown_behavior'] = 'terminate'
res = ec2.run_instances(**params)
else:
raise
instids = [i.id for i in res.instances]
while True:
try:
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg=str(e))
terminated_instances = [
str(instance.id) for instance in res.instances if instance.state == 'terminated'
]
if terminated_instances:
module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, 'placement_group'):
params['placement_group'] = placement_group
elif placement_group:
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
# 'terminate'd. For convenience, we'll ignore the latter value.
if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
module.fail_json(
msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
if spot_launch_group and isinstance(spot_launch_group, string_types):
params['launch_group'] = spot_launch_group
params.update(dict(
count=count_remaining,
type=spot_type,
))
utc_valid_until = (
datetime.datetime.utcnow()
+ datetime.timedelta(seconds=spot_wait_timeout))
params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
res = ec2.request_spot_instances(spot_price, **params)
if wait:
instids = await_spot_requests(module, ec2, res, count)
else:
instids = []
except boto.exception.BotoServerError as e:
module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
num_running = 0
wait_timeout = time.time() + wait_timeout
res_list = ()
while wait_timeout > time.time() and num_running < len(instids):
try:
res_list = ec2.get_all_instances(instids)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstanceID.NotFound':
time.sleep(1)
continue
else:
raise
num_running = 0
for res in res_list:
num_running += len([i for i in res.instances if i.state == 'running'])
if len(res_list) <= 0:
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
for res in res_list:
running_instances.extend(res.instances)
if source_dest_check is False:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
if termination_protection is True:
for inst in res.instances:
inst.modify_attribute('disableApiTermination', True)
if instance_tags and instids:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
inst.update()
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError as e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
filters={'instance-state-name': 'terminated'})
try:
num_terminated = sum([len(res.instances) for res in response])
except Exception as e:
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
instance_dict_array = []
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
for inst in res.instances:
instance_dict_array.append(get_instance_info(inst))
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state, instance_tags):
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
group_id = module.params.get('group_id')
group_name = module.params.get('group')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
if module.params.get('id'):
filters['client-token'] = module.params['id']
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
existing_instances_array = []
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
warn_if_public_ip_assignment_changed(module, inst)
changed = (check_source_dest_attr(module, inst, ec2) or
check_termination_protection(module, inst) or changed)
# Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
if inst.vpc_id and group_name:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
if isinstance(group_name, string_types):
group_name = [group_name]
unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
if unmatched:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
elif inst.vpc_id and group_id:
if isinstance(group_id, string_types):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_ids = [grp_item.id for grp_item in grp_details]
if inst.vpc_id and (group_name or group_id):
if set(sg.id for sg in inst.groups) != set(group_ids):
changed = inst.modify_attribute('groupSet', group_ids)
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
existing_instances_array.append(inst.id)
instance_ids = list(set(existing_instances_array + (instance_ids or [])))
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
instance_dict_array = []
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
instance_dict_array.append(get_instance_info(i))
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def restart_instances(module, ec2, instance_ids, state, instance_tags):
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
if module.params.get('id'):
filters['client-token'] = module.params['id']
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
warn_if_public_ip_assignment_changed(module, inst)
changed = (check_source_dest_attr(module, inst, ec2) or
check_termination_protection(module, inst) or changed)
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
inst.reboot()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
return (changed, instance_dict_array, instance_ids)
def check_termination_protection(module, inst):
termination_protection = module.params.get('termination_protection')
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
return True
def check_source_dest_attr(module, inst, ec2):
source_dest_check = module.params.get('source_dest_check')
if source_dest_check is not None:
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
return True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
return True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc())
def warn_if_public_ip_assignment_changed(module, instance):
# This is a non-modifiable attribute.
assign_public_ip = module.params.get('assign_public_ip')
# Check that public ip assignment is the same and warn if not
public_dns_name = getattr(instance, 'public_dns_name', None)
if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
"Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
key_name=dict(aliases=['keypair']),
id=dict(),
group=dict(type='list', aliases=['groups']),
group_id=dict(type='list'),
zone=dict(aliases=['aws_zone', 'ec2_zone']),
instance_type=dict(aliases=['type']),
spot_price=dict(),
spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
spot_launch_group=dict(),
image=dict(),
kernel=dict(),
count=dict(type='int', default='1'),
monitoring=dict(type='bool', default=False),
ramdisk=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=300),
spot_wait_timeout=dict(default=600),
placement_group=dict(),
user_data=dict(),
instance_tags=dict(type='dict'),
vpc_subnet_id=dict(),
assign_public_ip=dict(type='bool'),
private_ip=dict(),
instance_profile_name=dict(),
instance_ids=dict(type='list', aliases=['instance_id']),
source_dest_check=dict(type='bool', default=None),
termination_protection=dict(type='bool', default=None),
state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
exact_count=dict(type='int', default=None),
count_tag=dict(),
volumes=dict(type='list'),
ebs_optimized=dict(type='bool', default=False),
tenancy=dict(default='default'),
network_interfaces=dict(type='list', aliases=['network_interface'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['group_name', 'group_id'],
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids'],
['network_interfaces', 'assign_public_ip'],
['network_interfaces', 'group'],
['network_interfaces', 'group_id'],
['network_interfaces', 'private_ip'],
['network_interfaces', 'vpc_subnet_id'],
],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if module.params.get('region') or not module.params.get('ec2_url'):
ec2 = ec2_connect(module)
elif module.params.get('ec2_url'):
ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
if 'region' not in aws_connect_kwargs:
aws_connect_kwargs['region'] = ec2.region
vpc = connect_vpc(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
tagged_instances = []
state = module.params['state']
if state == 'absent':
instance_ids = module.params['instance_ids']
if not instance_ids:
module.fail_json(msg='instance_ids list is required for absent state')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
elif state in ('restarted'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
# Always return instances in the same order
if new_instance_ids:
new_instance_ids.sort()
if instance_dict_array:
instance_dict_array.sort(key=lambda x: x['id'])
if tagged_instances:
tagged_instances.sort(key=lambda x: x['id'])
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
if __name__ == '__main__':
main()
| true
| true
|
f7171c2df3dccec4804c1204824186fb4e3f7e3c
| 11,988
|
py
|
Python
|
azurelinuxagent/common/utils/textutil.py
|
ezeeyahoo/WALinuxAgent
|
7bb93ee0d75b91c6e9bc6d69003b4fdce9697ec2
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/utils/textutil.py
|
ezeeyahoo/WALinuxAgent
|
7bb93ee0d75b91c6e9bc6d69003b4fdce9697ec2
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/utils/textutil.py
|
ezeeyahoo/WALinuxAgent
|
7bb93ee0d75b91c6e9bc6d69003b4fdce9697ec2
|
[
"Apache-2.0"
] | 1
|
2020-08-18T20:15:17.000Z
|
2020-08-18T20:15:17.000Z
|
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import base64
import crypt
import hashlib
import random
import re
import string
import struct
import sys
import zlib
import xml.dom.minidom as minidom
from azurelinuxagent.common.future import ustr
def parse_doc(xml_text):
"""
Parse xml document from string
"""
# The minidom lib has some issue with unicode in python2.
# Encode the string into utf-8 first
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
"""
Get all nodes by tag and namespace under Node root.
"""
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
"""
Get first node by tag and namespace under Node root.
"""
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
"""
Get node text
"""
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
"""
Get text of node by tag and namespace under Node root.
"""
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
"""
Get attribute of xml node
"""
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, range):
"""
Unpack bytes into python values.
"""
result = 0
for i in range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
"""
Unpack little endian bytes into python values.
"""
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
"""
Unpack big endian bytes into python values.
"""
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
"""
Dump range of buf in formatted hex.
"""
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
"""
Dump buf in formatted hex.
"""
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
"""
Return True if 'a' in 'low' <= a >= 'high'
"""
return (a >= low and a <= high)
def is_printable(ch):
"""
Return True if character is displayable.
"""
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size):
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte = buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function.
"""
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
"""
Build DHCP request string.
"""
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
"""
Return hex string packed into a binary struct.
"""
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
found = False
no_match = -1
match_start = no_match
for i in range(0, len(config)):
if config[i].startswith(name) and match_start == no_match:
config[i] = "{0} {1}".format(name, val)
found = True
elif config[i].lower().startswith("match"):
if config[i].lower().startswith("match all"):
# outside match block
match_start = no_match
elif match_start == no_match:
# inside match block
match_start = i
if not found:
if match_start != no_match:
i = match_start
config.insert(i, "{0} {1}".format(name, val))
return config
def set_ini_config(config, name, val):
notfound = True
nameEqual = name + '='
length = len(config)
text = "{0}=\"{1}\"".format(name, val)
for i in reversed(range(0, length)):
if config[i].startswith(nameEqual):
config[i] = text
notfound = False
break
if notfound:
config.insert(length - 1, text)
def replace_non_ascii(incoming, replace_char=''):
outgoing = ''
if incoming is not None:
for c in incoming:
if str_to_ord(c) > 128:
outgoing += replace_char
else:
outgoing += c
return outgoing
def remove_bom(c):
"""
bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8.
"""
if not is_str_none_or_whitespace(c) and \
len(c) > 2 and \
str_to_ord(c[0]) > 128 and \
str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
if sys.version_info[0] == 2:
# if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt
password = password.encode('utf-8')
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
def compress(s):
"""
Compress a string, and return the base64 encoded result of the compression.
This method returns a string instead of a byte array. It is expected
that this method is called to compress smallish strings, not to compress
the contents of a file. The output of this method is suitable for
embedding in log statements.
"""
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8')
return base64.b64encode(zlib.compress(s))
def b64encode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8')
return base64.b64encode(s)
def b64decode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64decode(s).decode('utf-8')
return base64.b64decode(s)
def safe_shlex_split(s):
import shlex
from azurelinuxagent.common.version import PY_VERSION
if PY_VERSION[:2] == (2, 6):
return shlex.split(s.encode('utf-8'))
return shlex.split(s)
def swap_hexstring(s, width=2):
r = len(s) % width
if r != 0:
s = ('0' * (width - (len(s) % width))) + s
return ''.join(reversed(
re.findall(
r'[a-f0-9]{{{0}}}'.format(width),
s,
re.IGNORECASE)))
def parse_json(json_str):
"""
Parse json string and return a resulting dictionary
"""
# trim null and whitespaces
result = None
if not is_str_empty(json_str):
import json
result = json.loads(json_str.rstrip(' \t\r\n\0'))
return result
def is_str_none_or_whitespace(s):
return s is None or len(s) == 0 or s.isspace()
def is_str_empty(s):
return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0'))
def hash_strings(string_list):
"""
Compute a cryptographic hash of a list of strings
:param string_list: The strings to be hashed
:return: The cryptographic hash (digest) of the strings in the order provided
"""
sha1_hash = hashlib.sha1()
for item in string_list:
sha1_hash.update(item.encode())
return sha1_hash.digest()
def format_memory_value(unit, value):
units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024}
if unit not in units:
raise ValueError("Unit must be one of {0}".format(units.keys()))
try:
value = float(value)
except TypeError:
raise TypeError('Value must be convertible to a float')
return int(value * units[unit])
def str_to_encoded_ustr(s, encoding='utf-8'):
"""
This function takes the string and converts it into the corresponding encoded ustr if its not already a ustr.
The encoding is utf-8 by default if not specified.
Note: ustr() is a unicode object for Py2 and a str object for Py3.
:param s: The string to convert to ustr
:param encoding: Encoding to use. Utf-8 by default
:return: Returns the corresponding ustr string. Returns None if input is None.
"""
# TODO: Import at the top of the file instead of a local import (using local import here to avoid cyclic dependency)
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if s is None or type(s) is ustr:
# If its already a ustr/None then return as is
return s
if PY_VERSION_MAJOR > 2:
try:
# For py3+, str() is unicode by default
if isinstance(s, bytes):
# str.encode() returns bytes which should be decoded to get the str.
return s.decode(encoding)
else:
# If its not encoded, just return the string
return ustr(s)
except Exception:
# If some issues in decoding, just return the string
return ustr(s)
# For Py2, explicitly convert the string to unicode with the specified encoding
return ustr(s, encoding=encoding)
| 27.75
| 120
| 0.593427
|
import base64
import crypt
import hashlib
import random
import re
import string
import struct
import sys
import zlib
import xml.dom.minidom as minidom
from azurelinuxagent.common.future import ustr
def parse_doc(xml_text):
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, range):
result = 0
for i in range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
return (a >= low and a <= high)
def is_printable(ch):
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size):
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte = buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
found = False
no_match = -1
match_start = no_match
for i in range(0, len(config)):
if config[i].startswith(name) and match_start == no_match:
config[i] = "{0} {1}".format(name, val)
found = True
elif config[i].lower().startswith("match"):
if config[i].lower().startswith("match all"):
match_start = no_match
elif match_start == no_match:
match_start = i
if not found:
if match_start != no_match:
i = match_start
config.insert(i, "{0} {1}".format(name, val))
return config
def set_ini_config(config, name, val):
notfound = True
nameEqual = name + '='
length = len(config)
text = "{0}=\"{1}\"".format(name, val)
for i in reversed(range(0, length)):
if config[i].startswith(nameEqual):
config[i] = text
notfound = False
break
if notfound:
config.insert(length - 1, text)
def replace_non_ascii(incoming, replace_char=''):
outgoing = ''
if incoming is not None:
for c in incoming:
if str_to_ord(c) > 128:
outgoing += replace_char
else:
outgoing += c
return outgoing
def remove_bom(c):
if not is_str_none_or_whitespace(c) and \
len(c) > 2 and \
str_to_ord(c[0]) > 128 and \
str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
if sys.version_info[0] == 2:
password = password.encode('utf-8')
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
def compress(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8')
return base64.b64encode(zlib.compress(s))
def b64encode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8')
return base64.b64encode(s)
def b64decode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64decode(s).decode('utf-8')
return base64.b64decode(s)
def safe_shlex_split(s):
import shlex
from azurelinuxagent.common.version import PY_VERSION
if PY_VERSION[:2] == (2, 6):
return shlex.split(s.encode('utf-8'))
return shlex.split(s)
def swap_hexstring(s, width=2):
r = len(s) % width
if r != 0:
s = ('0' * (width - (len(s) % width))) + s
return ''.join(reversed(
re.findall(
r'[a-f0-9]{{{0}}}'.format(width),
s,
re.IGNORECASE)))
def parse_json(json_str):
result = None
if not is_str_empty(json_str):
import json
result = json.loads(json_str.rstrip(' \t\r\n\0'))
return result
def is_str_none_or_whitespace(s):
return s is None or len(s) == 0 or s.isspace()
def is_str_empty(s):
return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0'))
def hash_strings(string_list):
sha1_hash = hashlib.sha1()
for item in string_list:
sha1_hash.update(item.encode())
return sha1_hash.digest()
def format_memory_value(unit, value):
units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024}
if unit not in units:
raise ValueError("Unit must be one of {0}".format(units.keys()))
try:
value = float(value)
except TypeError:
raise TypeError('Value must be convertible to a float')
return int(value * units[unit])
def str_to_encoded_ustr(s, encoding='utf-8'):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if s is None or type(s) is ustr:
return s
if PY_VERSION_MAJOR > 2:
try:
if isinstance(s, bytes):
return s.decode(encoding)
else:
return ustr(s)
except Exception:
return ustr(s)
return ustr(s, encoding=encoding)
| true
| true
|
f7171c608104cc14fe26b6320a24c63490559aab
| 1,848
|
py
|
Python
|
var/spack/repos/builtin/packages/mathematica/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-06-25T15:25:29.000Z
|
2020-06-25T15:25:29.000Z
|
var/spack/repos/builtin/packages/mathematica/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-06T19:11:46.000Z
|
2018-07-06T19:12:28.000Z
|
var/spack/repos/builtin/packages/mathematica/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-03-06T11:04:37.000Z
|
2020-03-06T11:04:37.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Mathematica(Package):
"""Mathematica: high-powered computation with thousands of Wolfram Language
functions, natural language input, real-world data, mobile support.
Note: A manual download is required for Mathematica.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.wolfram.com/mathematica/"
url = 'file://{0}/Mathematica_12.0.0_LINUX.sh'.format(os.getcwd())
version('12.0.0',
sha256='b9fb71e1afcc1d72c200196ffa434512d208fa2920e207878433f504e58ae9d7',
expand=False)
# Licensing
license_required = True
license_comment = '#'
license_files = ['Configuration/Licensing/mathpass']
license_url = 'https://reference.wolfram.com/language/tutorial/RegistrationAndPasswords.html#857035062'
def install(self, spec, prefix):
sh = which('sh')
sh(self.stage.archive_file, '--', '-auto', '-verbose',
'-targetdir={0}'.format(prefix),
'-execdir={0}'.format(prefix.bin),
'-selinux=y')
# This is what most people would use on a cluster but the installer
# does not symlink it
ws_link_path = os.path.join(prefix.bin, 'wolframscript')
if not os.path.exists(ws_link_path):
ln = which('ln')
ws_path = os.path.join(prefix, 'Executables', 'wolframscript')
ln('-s', ws_path, ws_link_path)
| 40.173913
| 112
| 0.66829
|
from spack import *
import os
class Mathematica(Package):
homepage = "https://www.wolfram.com/mathematica/"
url = 'file://{0}/Mathematica_12.0.0_LINUX.sh'.format(os.getcwd())
version('12.0.0',
sha256='b9fb71e1afcc1d72c200196ffa434512d208fa2920e207878433f504e58ae9d7',
expand=False)
license_required = True
license_comment = '#'
license_files = ['Configuration/Licensing/mathpass']
license_url = 'https://reference.wolfram.com/language/tutorial/RegistrationAndPasswords.html#857035062'
def install(self, spec, prefix):
sh = which('sh')
sh(self.stage.archive_file, '--', '-auto', '-verbose',
'-targetdir={0}'.format(prefix),
'-execdir={0}'.format(prefix.bin),
'-selinux=y')
ws_link_path = os.path.join(prefix.bin, 'wolframscript')
if not os.path.exists(ws_link_path):
ln = which('ln')
ws_path = os.path.join(prefix, 'Executables', 'wolframscript')
ln('-s', ws_path, ws_link_path)
| true
| true
|
f7171cb105ed3aeddea51ce5ca8c8235d6316ea6
| 2,672
|
py
|
Python
|
utils/normalizer.py
|
bibofeng/DeepRL-1
|
7b14d9720a8ea1e08b05a2889d699a70174caf8f
|
[
"Apache-2.0"
] | null | null | null |
utils/normalizer.py
|
bibofeng/DeepRL-1
|
7b14d9720a8ea1e08b05a2889d699a70174caf8f
|
[
"Apache-2.0"
] | null | null | null |
utils/normalizer.py
|
bibofeng/DeepRL-1
|
7b14d9720a8ea1e08b05a2889d699a70174caf8f
|
[
"Apache-2.0"
] | 1
|
2021-08-11T19:37:04.000Z
|
2021-08-11T19:37:04.000Z
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import torch
import numpy as np
class Normalizer:
def __init__(self, o_size):
self.stats = SharedStats(o_size)
def __call__(self, o_):
o = torch.FloatTensor(o_)
self.stats.feed(o)
std = (self.stats.v + 1e-6) ** .5
o = (o - self.stats.m) / std
return o.numpy().reshape(o_.shape)
class StaticNormalizer:
def __init__(self, o_size):
self.offline_stats = SharedStats(o_size)
self.online_stats = SharedStats(o_size)
def __call__(self, o_):
if np.isscalar(o_):
o = torch.FloatTensor([o_])
else:
o = torch.FloatTensor(o_)
self.online_stats.feed(o)
if self.offline_stats.n[0] == 0:
return o_
std = (self.offline_stats.v + 1e-6) ** .5
o = (o - self.offline_stats.m) / std
o = o.numpy()
if np.isscalar(o_):
o = np.asscalar(o)
else:
o = o.reshape(o_.shape)
return o
class SharedStats:
def __init__(self, o_size):
self.m = torch.zeros(o_size)
self.v = torch.zeros(o_size)
self.n = torch.zeros(1)
self.m.share_memory_()
self.v.share_memory_()
self.n.share_memory_()
def feed(self, o):
n = self.n[0]
new_m = self.m * (n / (n + 1)) + o / (n + 1)
self.v.copy_(self.v * (n / (n + 1)) + (o - self.m) * (o - new_m) / (n + 1))
self.m.copy_(new_m)
self.n.add_(1)
def zero(self):
self.m.zero_()
self.v.zero_()
self.n.zero_()
def load(self, stats):
self.m.copy_(stats.m)
self.v.copy_(stats.v)
self.n.copy_(stats.n)
def merge(self, B):
A = self
n_A = self.n[0]
n_B = B.n[0]
n = n_A + n_B
delta = B.m - A.m
m = A.m + delta * n_B / n
v = A.v * n_A + B.v * n_B + delta * delta * n_A * n_B / n
v /= n
self.m.copy_(m)
self.v.copy_(v)
self.n.add_(B.n)
def state_dict(self):
return {'m': self.m.numpy(),
'v': self.v.numpy(),
'n': self.n.numpy()}
def load_state_dict(self, saved):
self.m = torch.FloatTensor(saved['m'])
self.v = torch.FloatTensor(saved['v'])
self.n = torch.FloatTensor(saved['n'])
| 30.022472
| 83
| 0.483533
| true
| true
|
|
f7171ccc27659928b6ad50c94240b8057bf13f57
| 6,779
|
py
|
Python
|
t5_closed_book_qa/t5_cbqa/preprocessors.py
|
DagonDD/google-research
|
ccd5d36e7a8ee1d672c93a801634bfd8f2e0c3eb
|
[
"Apache-2.0"
] | 1
|
2020-09-23T11:38:28.000Z
|
2020-09-23T11:38:28.000Z
|
t5_closed_book_qa/t5_cbqa/preprocessors.py
|
DagonDD/google-research
|
ccd5d36e7a8ee1d672c93a801634bfd8f2e0c3eb
|
[
"Apache-2.0"
] | null | null | null |
t5_closed_book_qa/t5_cbqa/preprocessors.py
|
DagonDD/google-research
|
ccd5d36e7a8ee1d672c93a801634bfd8f2e0c3eb
|
[
"Apache-2.0"
] | 1
|
2020-09-23T11:38:32.000Z
|
2020-09-23T11:38:32.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""T5 CBQA preprocessors."""
import tensorflow.compat.v1 as tf
def natural_questions_nocontext(
dataset,
prefix='nq question: ',
drop_yes_no=False,
max_tokens=None,
max_answers=None,
):
"""Convert Natural Questions TFDS to open domain with multiple answers.
Examples with no short or yes/no answers are filtered. All short and yes/no
answers (even across annotations) are emitted, so the targets produced by this
preprocessor are invalid in the case of multiple annotations. However, these
should not occur in the train set.
The function takes the natural_questions TFDS dataset an emits examples of the
form:
{
'inputs': 'nq question: what are the names of the olsen twins'
'targets': 'answer: Mary-Kate answer: Ashley'
}
Args:
dataset: a tf.data.Dataset to process.
prefix: str, prefix to prepend to the inputs.
drop_yes_no: bool, whether to drop yes/no answers, keeping only short
answers.
max_tokens: (Optional) int, the maximum number of tokens (as specified by
NQ) beyond which a short answer is dropped. None are dropped if set to
`None`.
max_answers: (Optional) int, the maximum number of answers to include in the
targets. Will be selected deterministically from the beginning of the
list. All answers are included if set to `None`.
Returns:
a tf.data.Dataset
"""
def nq_map(ex):
"""Map Natural Questions example to text-to-text example."""
inputs = prefix + ex['question']['text']
annotations = ex['annotations']
yes_no_labels = annotations['yes_no_answer']
if drop_yes_no:
yes_no_labels = -1 * tf.ones_like(yes_no_labels)
yes_no_answers = tf.boolean_mask(yes_no_labels, yes_no_labels > -1)
yes_no_answers = tf.where_v2(tf.equal(yes_no_answers, 1), 'yes', 'no')
short_answers = annotations['short_answers']['text'].flat_values
short_answer_starts = annotations['short_answers']['text'].row_starts()
if max_tokens:
start_tokens = annotations['short_answers']['start_token']
end_tokens = annotations['short_answers']['end_token']
dropped_answers = end_tokens - start_tokens > max_tokens
short_answers = tf.boolean_mask(
short_answers, tf.math.logical_not(dropped_answers.values))
# Subtract dropped answers from row starts.
row_drop_count = tf.math.reduce_sum(
tf.cast(dropped_answers, tf.int64), axis=1)
short_answer_starts -= tf.concat(
[[0], tf.math.cumsum(row_drop_count[:-1])], axis=0)
answers = tf.concat([yes_no_answers, short_answers], axis=0)
if max_answers:
answers = answers[:max_answers]
targets = tf.strings.reduce_join('answer: ' + answers, separator=' ')
return {
'inputs': inputs,
'targets': targets,
'short_answers/values': short_answers,
'short_answers/row_starts': short_answer_starts,
'yes_no_answers': yes_no_labels
}
dataset = dataset.map(
nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.filter(lambda ex: tf.strings.length(ex['targets']) > 0)
def natural_questions_open(
dataset,
prefix='nq question: '
):
"""Convert Natural Questions Open TFDS to examples.
If there are multiple answers in the input, selects the first one as the
target.
The function takes the natural_question_open TFDS dataset and emits examples
of the form:
{
'inputs': 'nq question: What are the names of the Olsen Twins?'
'targets': 'Mary-Kate and Ashley',
'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']
}
Args:
dataset: a tf.data.Dataset to process.
prefix: str, prefix to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
def nq_map(ex):
"""Map Natural Questions example to text-to-text example."""
return {
'inputs': prefix + ex['question'],
'targets': ex['answer'][0],
'answers': ex['answer'],
}
return dataset.map(nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def trivia_qa_open(
dataset,
prefix='trivia_qa question: '
):
"""Convert TriviaQA dataset to open domain qa examples.
The function takes the trivia_qa TFDS dataset and emits examples of the
form:
{
'inputs': 'trivia_qa question: What are the names of the Olsen Twins?'
'targets': 'Mary-Kate and Ashley',
'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']
}
Args:
dataset: a tf.data.Dataset to process.
prefix: str, prefix to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
def tqa_map(ex):
"""Map TriviaQA example to text-to-text example."""
return {
'inputs': prefix + ex['question'],
'targets': ex['answer']['value'],
'answers': ex['answer']['aliases'],
}
return dataset.map(tqa_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def web_questions_open(
dataset,
prefix='wq question: '
):
"""Convert WebQuestions TFDS to open domain examples.
If there are multiple answers in the input, selects the first one as the
target.
The function takes the web_questions TFDS dataset and emits examples of the
form:
{
'inputs': 'wq question: What are the names of the Olsen Twins?'
'targets': 'Mary-Kate and Ashley',
'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']
}
Args:
dataset: a tf.data.Dataset to process.
prefix: str, prefix to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
def wq_map(ex):
"""Map WebQuestions example to text-to-text example."""
return {
'inputs': prefix + ex['question'],
'targets': ex['answers'][0],
'answers': ex['answers'],
}
return dataset.map(wq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def sample_answer(
dataset,
):
"""Replaces target with sampled answer."""
def samp_map(ex):
answers = tf.random.shuffle(ex['answers'])
return {
'inputs': ex['inputs'],
'targets': answers[0],
'answers': answers,
}
return dataset.map(samp_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
| 31.384259
| 80
| 0.682107
|
import tensorflow.compat.v1 as tf
def natural_questions_nocontext(
dataset,
prefix='nq question: ',
drop_yes_no=False,
max_tokens=None,
max_answers=None,
):
def nq_map(ex):
inputs = prefix + ex['question']['text']
annotations = ex['annotations']
yes_no_labels = annotations['yes_no_answer']
if drop_yes_no:
yes_no_labels = -1 * tf.ones_like(yes_no_labels)
yes_no_answers = tf.boolean_mask(yes_no_labels, yes_no_labels > -1)
yes_no_answers = tf.where_v2(tf.equal(yes_no_answers, 1), 'yes', 'no')
short_answers = annotations['short_answers']['text'].flat_values
short_answer_starts = annotations['short_answers']['text'].row_starts()
if max_tokens:
start_tokens = annotations['short_answers']['start_token']
end_tokens = annotations['short_answers']['end_token']
dropped_answers = end_tokens - start_tokens > max_tokens
short_answers = tf.boolean_mask(
short_answers, tf.math.logical_not(dropped_answers.values))
row_drop_count = tf.math.reduce_sum(
tf.cast(dropped_answers, tf.int64), axis=1)
short_answer_starts -= tf.concat(
[[0], tf.math.cumsum(row_drop_count[:-1])], axis=0)
answers = tf.concat([yes_no_answers, short_answers], axis=0)
if max_answers:
answers = answers[:max_answers]
targets = tf.strings.reduce_join('answer: ' + answers, separator=' ')
return {
'inputs': inputs,
'targets': targets,
'short_answers/values': short_answers,
'short_answers/row_starts': short_answer_starts,
'yes_no_answers': yes_no_labels
}
dataset = dataset.map(
nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.filter(lambda ex: tf.strings.length(ex['targets']) > 0)
def natural_questions_open(
dataset,
prefix='nq question: '
):
def nq_map(ex):
return {
'inputs': prefix + ex['question'],
'targets': ex['answer'][0],
'answers': ex['answer'],
}
return dataset.map(nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def trivia_qa_open(
dataset,
prefix='trivia_qa question: '
):
def tqa_map(ex):
return {
'inputs': prefix + ex['question'],
'targets': ex['answer']['value'],
'answers': ex['answer']['aliases'],
}
return dataset.map(tqa_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def web_questions_open(
dataset,
prefix='wq question: '
):
def wq_map(ex):
return {
'inputs': prefix + ex['question'],
'targets': ex['answers'][0],
'answers': ex['answers'],
}
return dataset.map(wq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def sample_answer(
dataset,
):
def samp_map(ex):
answers = tf.random.shuffle(ex['answers'])
return {
'inputs': ex['inputs'],
'targets': answers[0],
'answers': answers,
}
return dataset.map(samp_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)
| true
| true
|
f7171d5731043db4e13be2098f930ef1ab4b964b
| 2,388
|
py
|
Python
|
matchms/Fragments.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/Fragments.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/Fragments.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
import numpy
class Fragments:
"""
Stores arrays of intensities and M/z values, with some checks on their internal consistency.
For example
.. testcode::
import numpy as np
from matchms import Fragments
mz = np.array([10, 20, 30], dtype="float")
intensities = np.array([100, 20, 300], dtype="float")
peaks = Fragments(mz=mz, intensities=intensities)
print(peaks[2])
Should output
.. testoutput::
[ 30. 300.]
Attributes
----------
mz:
Numpy array of m/z values.
intensities:
Numpy array of peak intensity values.
"""
def __init__(self, mz=None, intensities=None):
assert isinstance(mz, numpy.ndarray), "Input argument 'mz' should be a numpy.array."
assert isinstance(intensities, numpy.ndarray), "Input argument 'intensities' should be a numpy.array."
assert mz.shape == intensities.shape, "Input arguments 'mz' and 'intensities' should be the same shape."
assert mz.dtype == "float", "Input argument 'mz' should be an array of type float."
assert intensities.dtype == "float", "Input argument 'intensities' should be an array of type float."
self._mz = mz
self._intensities = intensities
assert self._is_sorted(), "mz values are out of order."
def __eq__(self, other):
return \
self.mz.shape == other.mz.shape and \
numpy.allclose(self.mz, other.mz) and \
self.intensities.shape == other.intensities.shape and \
numpy.allclose(self.intensities, other.intensities)
def __len__(self):
return self._mz.size
def __getitem__(self, item):
return numpy.asarray([self.mz[item], self.intensities[item]])
def _is_sorted(self):
return numpy.all(self.mz[:-1] <= self.mz[1:])
def clone(self):
return Fragments(self.mz, self.intensities)
@property
def mz(self):
"""getter method for mz private variable"""
return self._mz.copy()
@property
def intensities(self):
"""getter method for intensities private variable"""
return self._intensities.copy()
@property
def to_numpy(self):
"""getter method to return stacked numpy array of both peak mz and
intensities"""
return numpy.vstack((self.mz, self.intensities)).T
| 29.481481
| 112
| 0.624372
|
import numpy
class Fragments:
def __init__(self, mz=None, intensities=None):
assert isinstance(mz, numpy.ndarray), "Input argument 'mz' should be a numpy.array."
assert isinstance(intensities, numpy.ndarray), "Input argument 'intensities' should be a numpy.array."
assert mz.shape == intensities.shape, "Input arguments 'mz' and 'intensities' should be the same shape."
assert mz.dtype == "float", "Input argument 'mz' should be an array of type float."
assert intensities.dtype == "float", "Input argument 'intensities' should be an array of type float."
self._mz = mz
self._intensities = intensities
assert self._is_sorted(), "mz values are out of order."
def __eq__(self, other):
return \
self.mz.shape == other.mz.shape and \
numpy.allclose(self.mz, other.mz) and \
self.intensities.shape == other.intensities.shape and \
numpy.allclose(self.intensities, other.intensities)
def __len__(self):
return self._mz.size
def __getitem__(self, item):
return numpy.asarray([self.mz[item], self.intensities[item]])
def _is_sorted(self):
return numpy.all(self.mz[:-1] <= self.mz[1:])
def clone(self):
return Fragments(self.mz, self.intensities)
@property
def mz(self):
return self._mz.copy()
@property
def intensities(self):
return self._intensities.copy()
@property
def to_numpy(self):
return numpy.vstack((self.mz, self.intensities)).T
| true
| true
|
f7171db882ba4b1df49ab8686fc4f9a29d574d7c
| 7,255
|
py
|
Python
|
expenses/views/category.py
|
PrzemyslawSarnacki/django-expenses
|
9e07d57e3e2292a318df7c7d8cc4571bb75309d3
|
[
"BSD-3-Clause"
] | 6
|
2018-09-29T23:58:50.000Z
|
2022-03-10T03:40:38.000Z
|
expenses/views/category.py
|
PrzemyslawSarnacki/django-expenses
|
9e07d57e3e2292a318df7c7d8cc4571bb75309d3
|
[
"BSD-3-Clause"
] | 2
|
2021-02-15T10:20:23.000Z
|
2022-03-09T22:32:37.000Z
|
expenses/views/category.py
|
PrzemyslawSarnacki/django-expenses
|
9e07d57e3e2292a318df7c7d8cc4571bb75309d3
|
[
"BSD-3-Clause"
] | 4
|
2020-08-06T12:44:09.000Z
|
2021-04-04T13:45:34.000Z
|
# Django-Expenses
# Copyright © 2018-2021, Chris Warrick.
# All rights reserved.
# See /LICENSE for licensing information.
"""Category management."""
from collections import defaultdict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils.translation import gettext as _
from expenses.forms import CategoryForm
from expenses.models import Category, Expense, ExpenseTemplate
from expenses.utils import revchron
@login_required
def category_list(request):
paginator = Paginator(Category.user_objects(request), settings.EXPENSES_PAGE_SIZE)
page = request.GET.get("page")
categories = paginator.get_page(page)
return render(
request,
"expenses/category_list.html",
{"htmltitle": _("Categories"), "pid": "category_list", "categories": categories,},
)
@login_required
def category_show(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
paginator = Paginator(
revchron(Expense.objects.filter(user=request.user, category=category).select_related("category")),
settings.EXPENSES_PAGE_SIZE,
)
page = request.GET.get("page")
expenses = paginator.get_page(page)
return render(
request, "expenses/category_show.html", {"expenses": expenses, "category": category, "pid": "category_show",}
)
@login_required
def category_show_templates(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
paginator = Paginator(
ExpenseTemplate.objects.filter(user=request.user, category=category)
.order_by("-date_added")
.select_related("category"),
settings.EXPENSES_PAGE_SIZE,
)
page = request.GET.get("page")
templates = paginator.get_page(page)
return render(
request,
"expenses/category_show_templates.html",
{"templates": templates, "category": category, "pid": "category_show_templates",},
)
@login_required
def category_add(request):
form = CategoryForm()
if request.method == "POST":
form = CategoryForm(request.POST)
if form.is_valid():
inst = form.save(commit=False)
inst.user = request.user
inst.save()
form.save_m2m()
return HttpResponseRedirect(reverse("expenses:category_list"))
return render(
request,
"expenses/category_add_edit.html",
{
"form": form,
"form_mode": "add",
"htmltitle": _("Add a category"),
"title": _("Add a category"),
"pid": "category_add",
},
)
@login_required
def category_edit(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
form = CategoryForm(instance=category)
if request.method == "POST":
form = CategoryForm(request.POST, instance=category)
if form.is_valid():
inst = form.save()
return HttpResponseRedirect(inst.get_absolute_url())
return render(
request,
"expenses/category_add_edit.html",
{
"form": form,
"form_mode": "edit",
"htmltitle": _("Edit category %s") % category.name,
"title": _("Edit category"),
"pid": "category_edit",
},
)
@login_required
def category_delete(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
move_succeeded = True
if request.method == "POST":
if category.total_count != 0:
dest = request.POST.get("move_destination")
move_succeeded = category.prepare_deletion(dest, request.user)
if move_succeeded:
category.delete()
messages.add_message(request, messages.SUCCESS, _("%s has been deleted.") % category.name)
return HttpResponseRedirect(reverse("expenses:category_list"))
categories = Category.user_objects(request)
show_del_button = True
if categories.count == 1 and category.total_count > 0:
show_del_button = False
return render(
request,
"expenses/category_delete.html",
{
"object": category,
"deletion_failed": not move_succeeded,
"htmltitle": _("Delete category %s") % category.name,
"pid": "category_delete",
"categories": categories,
"show_del_button": show_del_button,
},
)
@login_required
def category_bulk_edit(request):
categories = Category.user_objects(request)
if request.method == "POST":
added_count = 0
changed_count = 0
unchanged_count = 0
failure_count = 0
failure_list = []
for cat in categories:
prefix = "cat_{}_".format(cat.pk)
new_name = request.POST.get(prefix + "name")
new_order = request.POST.get(prefix + "order")
if new_name and new_order and new_order.isnumeric():
# can be changed
new_order = int(new_order)
if cat.name != new_name or cat.order != new_order:
cat.name = new_name
cat.order = new_order
cat.save()
changed_count += 1
else:
unchanged_count += 1
else:
failure_count += 1
failure_list.append(cat.name)
additions = defaultdict(dict)
print(request.POST)
for k, v in request.POST.items():
if k.startswith("add_"):
print(k, v)
_add, aid, key = k.split("_")
additions[aid][key] = v
for k, fields in additions.items():
new_name = fields.get("name")
new_order = fields.get("order")
if new_name and new_order and new_order.isnumeric():
c = Category()
c.name = new_name
c.order = new_order
c.user = request.user
c.save()
added_count += 1
else:
failure_count += 1
failure_list.append("+{}/{}".format(new_name, new_order))
return render(
request,
"expenses/category_bulk_edit_results.html",
{
"htmltitle": _("Edit categories"),
"title": _("Edit categories"),
"pid": "category_bulk_edit_results",
"added_count": added_count,
"changed_count": changed_count,
"unchanged_count": unchanged_count,
"failure_count": failure_count,
"failure_list": failure_list,
},
)
return render(
request,
"expenses/category_bulk_edit.html",
{
"categories": categories,
"htmltitle": _("Edit categories"),
"title": _("Edit categories"),
"pid": "category_bulk_edit",
},
)
| 32.533632
| 117
| 0.596141
|
from collections import defaultdict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils.translation import gettext as _
from expenses.forms import CategoryForm
from expenses.models import Category, Expense, ExpenseTemplate
from expenses.utils import revchron
@login_required
def category_list(request):
paginator = Paginator(Category.user_objects(request), settings.EXPENSES_PAGE_SIZE)
page = request.GET.get("page")
categories = paginator.get_page(page)
return render(
request,
"expenses/category_list.html",
{"htmltitle": _("Categories"), "pid": "category_list", "categories": categories,},
)
@login_required
def category_show(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
paginator = Paginator(
revchron(Expense.objects.filter(user=request.user, category=category).select_related("category")),
settings.EXPENSES_PAGE_SIZE,
)
page = request.GET.get("page")
expenses = paginator.get_page(page)
return render(
request, "expenses/category_show.html", {"expenses": expenses, "category": category, "pid": "category_show",}
)
@login_required
def category_show_templates(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
paginator = Paginator(
ExpenseTemplate.objects.filter(user=request.user, category=category)
.order_by("-date_added")
.select_related("category"),
settings.EXPENSES_PAGE_SIZE,
)
page = request.GET.get("page")
templates = paginator.get_page(page)
return render(
request,
"expenses/category_show_templates.html",
{"templates": templates, "category": category, "pid": "category_show_templates",},
)
@login_required
def category_add(request):
form = CategoryForm()
if request.method == "POST":
form = CategoryForm(request.POST)
if form.is_valid():
inst = form.save(commit=False)
inst.user = request.user
inst.save()
form.save_m2m()
return HttpResponseRedirect(reverse("expenses:category_list"))
return render(
request,
"expenses/category_add_edit.html",
{
"form": form,
"form_mode": "add",
"htmltitle": _("Add a category"),
"title": _("Add a category"),
"pid": "category_add",
},
)
@login_required
def category_edit(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
form = CategoryForm(instance=category)
if request.method == "POST":
form = CategoryForm(request.POST, instance=category)
if form.is_valid():
inst = form.save()
return HttpResponseRedirect(inst.get_absolute_url())
return render(
request,
"expenses/category_add_edit.html",
{
"form": form,
"form_mode": "edit",
"htmltitle": _("Edit category %s") % category.name,
"title": _("Edit category"),
"pid": "category_edit",
},
)
@login_required
def category_delete(request, slug):
category = get_object_or_404(Category, slug=slug, user=request.user)
move_succeeded = True
if request.method == "POST":
if category.total_count != 0:
dest = request.POST.get("move_destination")
move_succeeded = category.prepare_deletion(dest, request.user)
if move_succeeded:
category.delete()
messages.add_message(request, messages.SUCCESS, _("%s has been deleted.") % category.name)
return HttpResponseRedirect(reverse("expenses:category_list"))
categories = Category.user_objects(request)
show_del_button = True
if categories.count == 1 and category.total_count > 0:
show_del_button = False
return render(
request,
"expenses/category_delete.html",
{
"object": category,
"deletion_failed": not move_succeeded,
"htmltitle": _("Delete category %s") % category.name,
"pid": "category_delete",
"categories": categories,
"show_del_button": show_del_button,
},
)
@login_required
def category_bulk_edit(request):
categories = Category.user_objects(request)
if request.method == "POST":
added_count = 0
changed_count = 0
unchanged_count = 0
failure_count = 0
failure_list = []
for cat in categories:
prefix = "cat_{}_".format(cat.pk)
new_name = request.POST.get(prefix + "name")
new_order = request.POST.get(prefix + "order")
if new_name and new_order and new_order.isnumeric():
new_order = int(new_order)
if cat.name != new_name or cat.order != new_order:
cat.name = new_name
cat.order = new_order
cat.save()
changed_count += 1
else:
unchanged_count += 1
else:
failure_count += 1
failure_list.append(cat.name)
additions = defaultdict(dict)
print(request.POST)
for k, v in request.POST.items():
if k.startswith("add_"):
print(k, v)
_add, aid, key = k.split("_")
additions[aid][key] = v
for k, fields in additions.items():
new_name = fields.get("name")
new_order = fields.get("order")
if new_name and new_order and new_order.isnumeric():
c = Category()
c.name = new_name
c.order = new_order
c.user = request.user
c.save()
added_count += 1
else:
failure_count += 1
failure_list.append("+{}/{}".format(new_name, new_order))
return render(
request,
"expenses/category_bulk_edit_results.html",
{
"htmltitle": _("Edit categories"),
"title": _("Edit categories"),
"pid": "category_bulk_edit_results",
"added_count": added_count,
"changed_count": changed_count,
"unchanged_count": unchanged_count,
"failure_count": failure_count,
"failure_list": failure_list,
},
)
return render(
request,
"expenses/category_bulk_edit.html",
{
"categories": categories,
"htmltitle": _("Edit categories"),
"title": _("Edit categories"),
"pid": "category_bulk_edit",
},
)
| true
| true
|
f7171df7f02b23aa472799d2c6f707b2a0a863d3
| 2,471
|
py
|
Python
|
src/scaling/scaling_exec.py
|
benh/twesos
|
194e1976d474005d807f37e7204ea08766e4b42a
|
[
"BSD-3-Clause"
] | 1
|
2019-02-17T15:56:26.000Z
|
2019-02-17T15:56:26.000Z
|
src/scaling/scaling_exec.py
|
benh/twesos
|
194e1976d474005d807f37e7204ea08766e4b42a
|
[
"BSD-3-Clause"
] | null | null | null |
src/scaling/scaling_exec.py
|
benh/twesos
|
194e1976d474005d807f37e7204ea08766e4b42a
|
[
"BSD-3-Clause"
] | 3
|
2017-07-10T07:28:30.000Z
|
2020-07-25T19:48:07.000Z
|
#!/usr/bin/env python
import mesos
import os
import pickle
import sys
CPUS = 1
MEM = 50*1024*1024
class NestedScheduler(mesos.Scheduler):
def __init__(self, todo, duration, executor):
mesos.Scheduler.__init__(self)
self.tid = 0
self.todo = todo
self.finished = 0
self.duration = duration
self.executor = executor
def getFrameworkName(self, driver):
return "Nested Framework: %d todo at %d secs" % (self.todo, self.duration)
def getExecutorInfo(self, driver):
execPath = os.path.join(os.getcwd(), "nested_exec")
return mesos.ExecutorInfo(execPath, "")
def registered(self, driver, fid):
print "Nested Scheduler Registered!"
def resourceOffer(self, driver, oid, offers):
tasks = []
for offer in offers:
if self.todo != self.tid:
self.tid += 1
pars = {"cpus": "%d" % CPUS, "mem": "%d" % MEM}
task = mesos.TaskDescription(self.tid, offer.slaveId,
"task %d" % self.tid, pars,
pickle.dumps(self.duration))
tasks.append(task)
#msg = mesos.FrameworkMessage(-1, , "")
#executor.sendFrameworkMessage("")
driver.replyToOffer(oid, tasks, {})
def statusUpdate(self, driver, status):
if status.state == mesos.TASK_FINISHED:
self.finished += 1
if self.finished == self.todo:
print "All nested tasks done, stopping scheduler and enclosing executor!"
driver.stop()
self.executor.stop()
class ScalingExecutor(mesos.Executor):
def __init__(self):
mesos.Executor.__init__(self)
self.tid = -1
self.nested_driver = -1
def launchTask(self, driver, task):
self.tid = task.taskId
master, (todo, duration) = pickle.loads(task.arg)
scheduler = NestedScheduler(todo, duration, self)
print "Running here:" + master
self.nested_driver = mesos.MesosSchedulerDriver(scheduler, master)
self.nested_driver.start()
def killTask(self, driver, tid):
if (tid != self.tid):
print "Expecting different task id ... killing anyway!"
if self.nested_driver != -1:
self.nested_driver.stop()
self.nested_driver.join()
driver.sendStatusUpdate(mesos.TaskStatus(tid, mesos.TASK_FINISHED, ""))
def shutdown(self, driver):
self.killTask(self.tid)
def error(self, driver, code, message):
print "Error: %s" % message
if __name__ == "__main__":
mesos.MesosExecutorDriver(ScalingExecutor()).run()
| 29.771084
| 79
| 0.647511
|
import mesos
import os
import pickle
import sys
CPUS = 1
MEM = 50*1024*1024
class NestedScheduler(mesos.Scheduler):
def __init__(self, todo, duration, executor):
mesos.Scheduler.__init__(self)
self.tid = 0
self.todo = todo
self.finished = 0
self.duration = duration
self.executor = executor
def getFrameworkName(self, driver):
return "Nested Framework: %d todo at %d secs" % (self.todo, self.duration)
def getExecutorInfo(self, driver):
execPath = os.path.join(os.getcwd(), "nested_exec")
return mesos.ExecutorInfo(execPath, "")
def registered(self, driver, fid):
print "Nested Scheduler Registered!"
def resourceOffer(self, driver, oid, offers):
tasks = []
for offer in offers:
if self.todo != self.tid:
self.tid += 1
pars = {"cpus": "%d" % CPUS, "mem": "%d" % MEM}
task = mesos.TaskDescription(self.tid, offer.slaveId,
"task %d" % self.tid, pars,
pickle.dumps(self.duration))
tasks.append(task)
driver.replyToOffer(oid, tasks, {})
def statusUpdate(self, driver, status):
if status.state == mesos.TASK_FINISHED:
self.finished += 1
if self.finished == self.todo:
print "All nested tasks done, stopping scheduler and enclosing executor!"
driver.stop()
self.executor.stop()
class ScalingExecutor(mesos.Executor):
def __init__(self):
mesos.Executor.__init__(self)
self.tid = -1
self.nested_driver = -1
def launchTask(self, driver, task):
self.tid = task.taskId
master, (todo, duration) = pickle.loads(task.arg)
scheduler = NestedScheduler(todo, duration, self)
print "Running here:" + master
self.nested_driver = mesos.MesosSchedulerDriver(scheduler, master)
self.nested_driver.start()
def killTask(self, driver, tid):
if (tid != self.tid):
print "Expecting different task id ... killing anyway!"
if self.nested_driver != -1:
self.nested_driver.stop()
self.nested_driver.join()
driver.sendStatusUpdate(mesos.TaskStatus(tid, mesos.TASK_FINISHED, ""))
def shutdown(self, driver):
self.killTask(self.tid)
def error(self, driver, code, message):
print "Error: %s" % message
if __name__ == "__main__":
mesos.MesosExecutorDriver(ScalingExecutor()).run()
| false
| true
|
f7171e3b6b8107097e5ca08c85f785efd7b32b6f
| 1,904
|
py
|
Python
|
st_nodeserver.py
|
Einstein42/st-nodeserver
|
5309bb8bb4d076f981feed752e39e67db3cb9d00
|
[
"MIT"
] | 1
|
2017-04-09T01:49:11.000Z
|
2017-04-09T01:49:11.000Z
|
st_nodeserver.py
|
Einstein42/st-nodeserver
|
5309bb8bb4d076f981feed752e39e67db3cb9d00
|
[
"MIT"
] | null | null | null |
st_nodeserver.py
|
Einstein42/st-nodeserver
|
5309bb8bb4d076f981feed752e39e67db3cb9d00
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
ST Micro Node Server for Polyglot
by Einstein.42(James Milne)
milne.james@gmail.com
'''
import sys
from polyglot.nodeserver_api import SimpleNodeServer, PolyglotConnector
from st_types import STControl
VERSION = "0.0.1"
class STNodeServer(SimpleNodeServer):
''' ST Micro Node Server '''
sensors = []
def setup(self):
self.logger = self.poly.logger
self.logger.info('Config File param: %s', self.poly.configfile)
try:
self.address = self.poly.nodeserver_config['server']['address']
self.port = self.poly.nodeserver_config['server']['port']
except (KeyError, ValueError) as ex:
self.logger.error('Could not find address or port value in config file. Exiting...')
sys.exit()
self.logger.info('Using Leshan Server: {} Port: {}'.format(self.address, self.port))
manifest = self.config.get('manifest', {})
self.controller = STControl(self, 'stcontrol', 'ST Control', True, manifest)
self.controller._discover()
self.update_config()
def poll(self):
if len(self.sensors) >= 1:
for i in self.sensors:
i.update_info()
def long_poll(self):
pass
def report_drivers(self):
if len(self.sensors) >= 1:
for i in self.sensors:
i.report_driver()
def main():
# Setup connection, node server, and nodes
poly = PolyglotConnector()
# Override shortpoll and longpoll timers to 5/30, once per second is unnessesary
nserver = STNodeServer(poly, 30, 60)
poly.connect()
poly.wait_for_config()
poly.logger.info("ST NodeServer Interface version " + VERSION + " created. Initiating setup.")
nserver.setup()
poly.logger.info("Setup completed. Running Server.")
nserver.run()
if __name__ == "__main__":
main()
| 31.733333
| 98
| 0.628676
|
import sys
from polyglot.nodeserver_api import SimpleNodeServer, PolyglotConnector
from st_types import STControl
VERSION = "0.0.1"
class STNodeServer(SimpleNodeServer):
sensors = []
def setup(self):
self.logger = self.poly.logger
self.logger.info('Config File param: %s', self.poly.configfile)
try:
self.address = self.poly.nodeserver_config['server']['address']
self.port = self.poly.nodeserver_config['server']['port']
except (KeyError, ValueError) as ex:
self.logger.error('Could not find address or port value in config file. Exiting...')
sys.exit()
self.logger.info('Using Leshan Server: {} Port: {}'.format(self.address, self.port))
manifest = self.config.get('manifest', {})
self.controller = STControl(self, 'stcontrol', 'ST Control', True, manifest)
self.controller._discover()
self.update_config()
def poll(self):
if len(self.sensors) >= 1:
for i in self.sensors:
i.update_info()
def long_poll(self):
pass
def report_drivers(self):
if len(self.sensors) >= 1:
for i in self.sensors:
i.report_driver()
def main():
poly = PolyglotConnector()
nserver = STNodeServer(poly, 30, 60)
poly.connect()
poly.wait_for_config()
poly.logger.info("ST NodeServer Interface version " + VERSION + " created. Initiating setup.")
nserver.setup()
poly.logger.info("Setup completed. Running Server.")
nserver.run()
if __name__ == "__main__":
main()
| true
| true
|
f7171ff457f47582df7834c6b68e24319c65475a
| 5,475
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | 3
|
2016-12-01T15:55:11.000Z
|
2016-12-01T15:56:38.000Z
|
ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
class TestTezClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "TEZ/0.4.0.2.1/package"
STACK_VERSION = "2.1"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "configure",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/etc/tez',
mode = 0755
)
self.assertResourceCalled('Directory', '/etc/tez/conf',
owner = 'tez',
group = 'hadoop',
recursive = True
)
self.assertResourceCalled('XmlConfig', 'tez-site.xml',
owner = 'tez',
group = 'hadoop',
conf_dir = '/etc/tez/conf',
configurations = self.getConfig()['configurations']['tez-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['tez-site'],
mode = 0664
)
self.assertResourceCalled('File', '/etc/tez/conf/tez-env.sh',
owner = 'tez',
content = InlineTemplate(self.getConfig()['configurations']['tez-env']['content']),
mode=0555
)
self.assertNoMoreResources()
@patch("resource_management.libraries.functions.get_hdp_version")
def test_upgrade(self, get_hdp_version_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "restart",
config_file="client-upgrade.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
get_hdp_version_mock.return_value = "2.2.1.0-2067"
self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
# for now, it's enough that hdp-select is confirmed
@patch("resource_management.libraries.functions.get_hdp_version")
def test_upgrade_23(self, get_hdp_version_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "restart",
config_file="client-upgrade.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
get_hdp_version_mock.return_value = "2.2.1.0-2067"
self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
# for now, it's enough that hdp-select is confirmed
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/client-upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "pre_upgrade_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
self.assertNoMoreResources()
self.assertEquals(2, mocks_dict['call'].call_count)
self.assertEquals(2, mocks_dict['checked_call'].call_count)
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[1][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[1][0][0])
| 42.773438
| 122
| 0.641461
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
class TestTezClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "TEZ/0.4.0.2.1/package"
STACK_VERSION = "2.1"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "configure",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/etc/tez',
mode = 0755
)
self.assertResourceCalled('Directory', '/etc/tez/conf',
owner = 'tez',
group = 'hadoop',
recursive = True
)
self.assertResourceCalled('XmlConfig', 'tez-site.xml',
owner = 'tez',
group = 'hadoop',
conf_dir = '/etc/tez/conf',
configurations = self.getConfig()['configurations']['tez-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['tez-site'],
mode = 0664
)
self.assertResourceCalled('File', '/etc/tez/conf/tez-env.sh',
owner = 'tez',
content = InlineTemplate(self.getConfig()['configurations']['tez-env']['content']),
mode=0555
)
self.assertNoMoreResources()
@patch("resource_management.libraries.functions.get_hdp_version")
def test_upgrade(self, get_hdp_version_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "restart",
config_file="client-upgrade.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
get_hdp_version_mock.return_value = "2.2.1.0-2067"
self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
@patch("resource_management.libraries.functions.get_hdp_version")
def test_upgrade_23(self, get_hdp_version_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "restart",
config_file="client-upgrade.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
get_hdp_version_mock.return_value = "2.2.1.0-2067"
self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
# for now, it's enough that hdp-select is confirmed
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/client-upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
classname = "TezClient",
command = "pre_upgrade_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
self.assertNoMoreResources()
self.assertEquals(2, mocks_dict['call'].call_count)
self.assertEquals(2, mocks_dict['checked_call'].call_count)
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[1][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[1][0][0])
| false
| true
|
f7172061b131608ed83712b9290ea4258e0d92ab
| 4,829
|
py
|
Python
|
examples/validate_secure_metadata.py
|
ninchat/ninchat-api
|
44d57210c760798ac403fa367ee2f2f280bb5dd6
|
[
"BSD-2-Clause"
] | 2
|
2017-07-23T12:51:45.000Z
|
2018-12-10T01:12:14.000Z
|
examples/validate_secure_metadata.py
|
ninchat/ninchat-api
|
44d57210c760798ac403fa367ee2f2f280bb5dd6
|
[
"BSD-2-Clause"
] | 1
|
2019-03-25T10:04:25.000Z
|
2019-03-25T10:04:25.000Z
|
examples/validate_secure_metadata.py
|
ninchat/ninchat-api
|
44d57210c760798ac403fa367ee2f2f280bb5dd6
|
[
"BSD-2-Clause"
] | 6
|
2016-06-23T07:24:04.000Z
|
2021-02-02T13:41:06.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2020, Somia Reality Oy
# All rights reserved.
# Installing dependencies:
#
# - Ubuntu/Debian: apt install python3-cryptography python3-jwcrypto
# - Using pip: pip3 install cryptography jwcrypto
from argparse import ArgumentParser
from base64 import b64decode, urlsafe_b64decode, urlsafe_b64encode
from calendar import timegm
from datetime import datetime
from hashlib import sha512
from hmac import compare_digest
from json import dumps, loads
from time import time
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from jwcrypto.jwe import JWE
from jwcrypto.jwk import JWK
MAX_EXPIRE_WINDOW = 10 * 24 * 60 * 60 # 10 days
def assert_secure_metadata(master_key_type, master_key_id, master_key_secret, secure_metadata_str, time_now=None):
if secure_metadata_str.count(".") < 2:
assert master_key_type == "ninchat"
return assert_ninchat_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now)
else:
assert master_key_type == "jwt"
return assert_jwt_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now)
def assert_ninchat_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now=None):
if "." in secure_metadata_str:
key_id, msg_b64 = secure_metadata_str.split(".", 1)
msg_iv = unpadded_urlsafe_b64decode(msg_b64)
else:
key_id, msg_b64 = secure_metadata_str.split("-", 1)
msg_iv = b64decode(msg_b64)
assert key_id == master_key_id
key = b64decode(master_key_secret)
msg_hashed = decrypt_aes_cbc(key, msg_iv)
sha = sha512()
digest = msg_hashed[:sha.digest_size]
msg_padded = msg_hashed[sha.digest_size:]
msg_json = msg_padded.rstrip(b"\0")
sha.update(msg_json)
assert compare_digest(sha.digest(), digest)
msg_json_bytes = msg_json.decode()
msg = loads(msg_json_bytes)
assert_not_expired(msg["expire"], time_now)
assert "user_id" not in msg
return msg["metadata"]
def assert_jwt_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now=None):
jwe = JWE()
jwe.allowed_algs = ["dir", "A256GCM"]
jwe.deserialize(secure_metadata_str)
assert jwe.jose_header["alg"] == "dir"
assert jwe.jose_header["enc"] == "A256GCM"
assert jwe.jose_header["kid"] == master_key_id
key = b64decode(master_key_secret)
jwk = JWK(kty="oct", k=urlsafe_b64encode(key).rstrip(b"=").decode())
jwe.decrypt(jwk)
msg_json_bytes = jwe.payload.decode()
msg = loads(msg_json_bytes)
assert_not_expired(msg["exp"], time_now)
assert "user_id" not in msg
return msg["ninchat.com/metadata"]
def assert_not_expired(expire_time, time_now=None):
if not time_now:
time_now = time()
assert isinstance(expire_time, (int, float))
assert expire_time > time_now
assert expire_time < time_now + MAX_EXPIRE_WINDOW
def decrypt_aes_cbc(key_bytes, iv_ciphertext):
block_len = AES.block_size // 8
assert len(iv_ciphertext) >= 2 * block_len
assert (len(iv_ciphertext) % block_len) == 0
iv = iv_ciphertext[:block_len]
ciphertext = iv_ciphertext[block_len:]
algo = AES(key_bytes)
mode = CBC(iv)
c = Cipher(algo, mode, default_backend())
d = c.decryptor()
plaintext = d.update(ciphertext)
plaintext += d.finalize()
return plaintext
def unpadded_urlsafe_b64decode(unpadded_str):
unpadded_bytes = unpadded_str.encode()
padded_bytes = unpadded_bytes + (b"", None, b"==", b"=")[len(unpadded_bytes) & 3]
return urlsafe_b64decode(padded_bytes)
def main():
time_format = "%Y-%m-%dT%H:%M:%SZ"
time_example = datetime.utcnow().strftime(time_format)
parser = ArgumentParser()
parser.add_argument("--now", metavar="TIME", help="fake timestamp (UTC) for checking expiration (example: {})".format(time_example))
parser.add_argument("master-key-type", help='"ninchat" or "jwt"')
parser.add_argument("master-key-id", help="encryption key id")
parser.add_argument("master-key-secret", help="base64-encoded encryption key (as received from Ninchat)")
parser.add_argument("secure-metadata", help="the string to validate")
args = parser.parse_args()
metadata = assert_secure_metadata(
getattr(args, "master-key-type"),
getattr(args, "master-key-id"),
getattr(args, "master-key-secret"),
getattr(args, "secure-metadata"),
timegm(datetime.strptime(args.now, time_format).utctimetuple()) if args.now else None,
)
print(dumps(metadata, indent=2))
if __name__ == "__main__":
main()
| 34.007042
| 136
| 0.720646
|
from argparse import ArgumentParser
from base64 import b64decode, urlsafe_b64decode, urlsafe_b64encode
from calendar import timegm
from datetime import datetime
from hashlib import sha512
from hmac import compare_digest
from json import dumps, loads
from time import time
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from jwcrypto.jwe import JWE
from jwcrypto.jwk import JWK
MAX_EXPIRE_WINDOW = 10 * 24 * 60 * 60
def assert_secure_metadata(master_key_type, master_key_id, master_key_secret, secure_metadata_str, time_now=None):
if secure_metadata_str.count(".") < 2:
assert master_key_type == "ninchat"
return assert_ninchat_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now)
else:
assert master_key_type == "jwt"
return assert_jwt_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now)
def assert_ninchat_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now=None):
if "." in secure_metadata_str:
key_id, msg_b64 = secure_metadata_str.split(".", 1)
msg_iv = unpadded_urlsafe_b64decode(msg_b64)
else:
key_id, msg_b64 = secure_metadata_str.split("-", 1)
msg_iv = b64decode(msg_b64)
assert key_id == master_key_id
key = b64decode(master_key_secret)
msg_hashed = decrypt_aes_cbc(key, msg_iv)
sha = sha512()
digest = msg_hashed[:sha.digest_size]
msg_padded = msg_hashed[sha.digest_size:]
msg_json = msg_padded.rstrip(b"\0")
sha.update(msg_json)
assert compare_digest(sha.digest(), digest)
msg_json_bytes = msg_json.decode()
msg = loads(msg_json_bytes)
assert_not_expired(msg["expire"], time_now)
assert "user_id" not in msg
return msg["metadata"]
def assert_jwt_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now=None):
jwe = JWE()
jwe.allowed_algs = ["dir", "A256GCM"]
jwe.deserialize(secure_metadata_str)
assert jwe.jose_header["alg"] == "dir"
assert jwe.jose_header["enc"] == "A256GCM"
assert jwe.jose_header["kid"] == master_key_id
key = b64decode(master_key_secret)
jwk = JWK(kty="oct", k=urlsafe_b64encode(key).rstrip(b"=").decode())
jwe.decrypt(jwk)
msg_json_bytes = jwe.payload.decode()
msg = loads(msg_json_bytes)
assert_not_expired(msg["exp"], time_now)
assert "user_id" not in msg
return msg["ninchat.com/metadata"]
def assert_not_expired(expire_time, time_now=None):
if not time_now:
time_now = time()
assert isinstance(expire_time, (int, float))
assert expire_time > time_now
assert expire_time < time_now + MAX_EXPIRE_WINDOW
def decrypt_aes_cbc(key_bytes, iv_ciphertext):
block_len = AES.block_size // 8
assert len(iv_ciphertext) >= 2 * block_len
assert (len(iv_ciphertext) % block_len) == 0
iv = iv_ciphertext[:block_len]
ciphertext = iv_ciphertext[block_len:]
algo = AES(key_bytes)
mode = CBC(iv)
c = Cipher(algo, mode, default_backend())
d = c.decryptor()
plaintext = d.update(ciphertext)
plaintext += d.finalize()
return plaintext
def unpadded_urlsafe_b64decode(unpadded_str):
unpadded_bytes = unpadded_str.encode()
padded_bytes = unpadded_bytes + (b"", None, b"==", b"=")[len(unpadded_bytes) & 3]
return urlsafe_b64decode(padded_bytes)
def main():
time_format = "%Y-%m-%dT%H:%M:%SZ"
time_example = datetime.utcnow().strftime(time_format)
parser = ArgumentParser()
parser.add_argument("--now", metavar="TIME", help="fake timestamp (UTC) for checking expiration (example: {})".format(time_example))
parser.add_argument("master-key-type", help='"ninchat" or "jwt"')
parser.add_argument("master-key-id", help="encryption key id")
parser.add_argument("master-key-secret", help="base64-encoded encryption key (as received from Ninchat)")
parser.add_argument("secure-metadata", help="the string to validate")
args = parser.parse_args()
metadata = assert_secure_metadata(
getattr(args, "master-key-type"),
getattr(args, "master-key-id"),
getattr(args, "master-key-secret"),
getattr(args, "secure-metadata"),
timegm(datetime.strptime(args.now, time_format).utctimetuple()) if args.now else None,
)
print(dumps(metadata, indent=2))
if __name__ == "__main__":
main()
| true
| true
|
f71720a0eccf6342d22032de87fb4c3251492c1b
| 836
|
py
|
Python
|
plotly/validators/cone/_stream.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/cone/_stream.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | null | null | null |
plotly/validators/cone/_stream.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='stream', parent_name='cone', **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
""",
**kwargs
)
| 34.833333
| 75
| 0.586124
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='stream', parent_name='cone', **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
""",
**kwargs
)
| true
| true
|
f71720bf1e6e85436ed4df37a21e70f073d0583e
| 29,705
|
py
|
Python
|
openaset/distamben/admin.py
|
muntaza/Open-Aset
|
f5eb6770a9f7184e3860a18cd655b35b248a9dd5
|
[
"BSD-2-Clause"
] | null | null | null |
openaset/distamben/admin.py
|
muntaza/Open-Aset
|
f5eb6770a9f7184e3860a18cd655b35b248a9dd5
|
[
"BSD-2-Clause"
] | null | null | null |
openaset/distamben/admin.py
|
muntaza/Open-Aset
|
f5eb6770a9f7184e3860a18cd655b35b248a9dd5
|
[
"BSD-2-Clause"
] | 2
|
2019-02-18T05:25:23.000Z
|
2021-02-01T16:45:23.000Z
|
### $Id: admin.py,v 1.29 2017/12/18 09:12:51 muntaza Exp $
from django.contrib import admin
from umum.models import Provinsi, Kabupaten, LokasiBidang, SKPD, SUBSKPD, KodeBarang, HakTanah, SatuanBarang, KeadaanBarang, SKPenghapusan, MutasiBerkurang, JenisPemanfaatan, AsalUsul, Tahun, GolonganBarang, Tanah, KontrakTanah, PenghapusanTanah, TanahPenghapusan, PemanfaatanTanah, TanahPemanfaatan, HargaTanah, TahunBerkurangUsulHapusTanah, TanahUsulHapus
#### Tanah
from umum.models import TanahDistamben, KontrakTanahDistamben, HargaTanahDistamben, TanahUsulHapusDistamben, TahunBerkurangUsulHapusTanahDistamben
from umum.models import TanahPenghapusanDistamben, TahunBerkurangTanahDistamben, PenghapusanTanahDistamben
from umum.models import SKPDAsalTanahDistamben, SKPDTujuanTanahDistamben, FotoTanahDistamben
from umum.admin import HargaTanahInline, TanahAdmin, KontrakTanahAdmin, HargaTanahAdmin, TahunBerkurangUsulHapusTanahInline, TanahUsulHapusAdmin
from umum.admin import TahunBerkurangTanahInline, PenghapusanTanahInline, TanahPenghapusanAdmin
from umum.admin import SKPDAsalTanahInline, SKPDTujuanTanahInline, FotoTanahInline
from umum.admin import GedungBangunanInline
#### Gedung Bangunan
from gedungbangunan.models import StatusTingkat, StatusBeton, KontrakGedungBangunan, HargaGedungBangunan, GedungBangunan, PenghapusanGedungBangunan, PemanfaatanGedungBangunan, TahunBerkurangGedungBangunan, Ruangan, TahunBerkurangUsulHapusGedung
from gedungbangunan.models import GedungBangunanPemanfaatan, GedungBangunanPenghapusan, GedungBangunanRuangan, GedungBangunanUsulHapus
from gedungbangunan.models import GedungBangunanDistamben, KontrakGedungBangunanDistamben, HargaGedungBangunanDistamben, GedungBangunanRuanganDistamben, GedungBangunanUsulHapusDistamben, TahunBerkurangUsulHapusGedungDistamben
from gedungbangunan.models import GedungBangunanPenghapusanDistamben, TahunBerkurangGedungBangunanDistamben, PenghapusanGedungBangunanDistamben
from gedungbangunan.models import SKPDAsalGedungBangunanDistamben, SKPDTujuanGedungBangunanDistamben, FotoGedungBangunanDistamben
from gedungbangunan.admin import HargaGedungBangunanInline, GedungBangunanAdmin, KontrakGedungBangunanAdmin, HargaGedungBangunanAdmin, RuanganInline, GedungBangunanRuanganAdmin, KDPGedungBangunanAdmin, TahunBerkurangUsulHapusGedungInline, GedungBangunanUsulHapusAdmin
from gedungbangunan.admin import TahunBerkurangGedungBangunanInline, PenghapusanGedungBangunanInline, GedungBangunanPenghapusanAdmin
from gedungbangunan.admin import SKPDAsalGedungBangunanInline, SKPDTujuanGedungBangunanInline, FotoGedungBangunanInline
#### Peralatan Mesin
from peralatanmesin.models import KontrakPeralatanMesin, HargaPeralatanMesin, PeralatanMesin, PenghapusanPeralatanMesin, PemanfaatanPeralatanMesin, TahunBerkurangPeralatanMesin, TahunBerkurangUsulHapusPeralatanMesin
#untuk menampung inline
from peralatanmesin.models import PeralatanMesinPemanfaatan, PeralatanMesinPenghapusan, PeralatanMesinUsulHapus
from peralatanmesin.models import PeralatanMesinDistamben, KontrakPeralatanMesinDistamben, HargaPeralatanMesinDistamben, PeralatanMesinUsulHapusDistamben, TahunBerkurangUsulHapusPeralatanMesinDistamben
from peralatanmesin.models import PeralatanMesinPenghapusanDistamben, TahunBerkurangPeralatanMesinDistamben, PenghapusanPeralatanMesinDistamben
from peralatanmesin.models import SKPDAsalPeralatanMesinDistamben, SKPDTujuanPeralatanMesinDistamben, FotoPeralatanMesinDistamben
from peralatanmesin.admin import HargaPeralatanMesinInline, PeralatanMesinAdmin, KontrakPeralatanMesinAdmin, HargaPeralatanMesinAdmin, TahunBerkurangUsulHapusPeralatanMesinInline, PeralatanMesinUsulHapusAdmin
from peralatanmesin.admin import TahunBerkurangPeralatanMesinInline, PenghapusanPeralatanMesinInline, PeralatanMesinPenghapusanAdmin
from peralatanmesin.admin import SKPDAsalPeralatanMesinInline, SKPDTujuanPeralatanMesinInline, FotoPeralatanMesinInline
#### Class Tanah
class TahunBerkurangTanahDistambenInline(TahunBerkurangTanahInline):
model = TahunBerkurangTanahDistamben
class PenghapusanTanahDistambenInline(PenghapusanTanahInline):
model = PenghapusanTanahDistamben
class SKPDAsalTanahDistambenInline(SKPDAsalTanahInline):
model = SKPDAsalTanahDistamben
class SKPDTujuanTanahDistambenInline(SKPDTujuanTanahInline):
model = SKPDTujuanTanahDistamben
class FotoTanahDistambenInline(FotoTanahInline):
model = FotoTanahDistamben
class GedungBangunanDistambenInline(GedungBangunanInline):
model = GedungBangunanDistamben
class HargaTanahDistambenInline(HargaTanahInline):
model = HargaTanahDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak":
kwargs["queryset"] = KontrakTanah.objects.filter(id_skpd__exact=17)
return super(HargaTanahDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusTanahDistambenInline(TahunBerkurangUsulHapusTanahInline):
model = TahunBerkurangUsulHapusTanahDistamben
class TanahDistambenAdmin(TanahAdmin):
inlines = [HargaTanahDistambenInline,
SKPDAsalTanahDistambenInline,
FotoTanahDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(TanahDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class TanahUsulHapusDistambenAdmin(TanahUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusTanahDistambenInline,
SKPDAsalTanahDistambenInline,
FotoTanahDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakTanahDistambenAdmin(KontrakTanahAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakTanahDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaTanahDistambenAdmin(HargaTanahAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
tanah_qs = Tanah.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_tanah__in=tanah_qs)
class TanahPenghapusanDistambenAdmin(TanahPenghapusanAdmin):
inlines = [PenghapusanTanahDistambenInline, TahunBerkurangTanahDistambenInline,
SKPDAsalTanahDistambenInline,
SKPDTujuanTanahDistambenInline,
FotoTanahDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
### Register Tanah Distamben
admin.site.register(TanahDistamben, TanahDistambenAdmin)
admin.site.register(TanahUsulHapusDistamben, TanahUsulHapusDistambenAdmin)
admin.site.register(KontrakTanahDistamben, KontrakTanahDistambenAdmin)
admin.site.register(HargaTanahDistamben, HargaTanahDistambenAdmin)
admin.site.register(TanahPenghapusanDistamben, TanahPenghapusanDistambenAdmin)
from gedungbangunan.models import KDPGedungBangunanDistamben
#### Class Gedung dan Bangunan
class TahunBerkurangGedungBangunanDistambenInline(TahunBerkurangGedungBangunanInline):
model = TahunBerkurangGedungBangunanDistamben
class PenghapusanGedungBangunanDistambenInline(PenghapusanGedungBangunanInline):
model = PenghapusanGedungBangunanDistamben
class SKPDAsalGedungBangunanDistambenInline(SKPDAsalGedungBangunanInline):
model = SKPDAsalGedungBangunanDistamben
class SKPDTujuanGedungBangunanDistambenInline(SKPDTujuanGedungBangunanInline):
model = SKPDTujuanGedungBangunanDistamben
class FotoGedungBangunanDistambenInline(FotoGedungBangunanInline):
model = FotoGedungBangunanDistamben
class HargaGedungBangunanDistambenInline(HargaGedungBangunanInline):
model = HargaGedungBangunanDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_gedung_bangunan":
kwargs["queryset"] = KontrakGedungBangunan.objects.filter(id_skpd__exact=17)
return super(HargaGedungBangunanDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusGedungDistambenInline(TahunBerkurangUsulHapusGedungInline):
model = TahunBerkurangUsulHapusGedungDistamben
class GedungBangunanDistambenAdmin(GedungBangunanAdmin):
inlines = [HargaGedungBangunanDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(GedungBangunanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class KDPGedungBangunanDistambenAdmin(KDPGedungBangunanAdmin):
inlines = [HargaGedungBangunanDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(KDPGedungBangunanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanRuanganDistambenAdmin(GedungBangunanRuanganAdmin):
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanUsulHapusDistambenAdmin(GedungBangunanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusGedungDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=3)
class KontrakGedungBangunanDistambenAdmin(KontrakGedungBangunanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakGedungBangunanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaGedungBangunanDistambenAdmin(HargaGedungBangunanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
gedung_bangunan_qs = GedungBangunan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_gedung_bangunan__in=gedung_bangunan_qs)
class GedungBangunanPenghapusanDistambenAdmin(GedungBangunanPenghapusanAdmin):
inlines = [PenghapusanGedungBangunanDistambenInline, TahunBerkurangGedungBangunanDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
SKPDTujuanGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register GedungBangunan Distamben
admin.site.register(GedungBangunanDistamben, GedungBangunanDistambenAdmin)
admin.site.register(KDPGedungBangunanDistamben, KDPGedungBangunanDistambenAdmin)
admin.site.register(GedungBangunanRuanganDistamben, GedungBangunanRuanganDistambenAdmin)
admin.site.register(GedungBangunanUsulHapusDistamben, GedungBangunanUsulHapusDistambenAdmin)
admin.site.register(KontrakGedungBangunanDistamben, KontrakGedungBangunanDistambenAdmin)
admin.site.register(HargaGedungBangunanDistamben, HargaGedungBangunanDistambenAdmin)
admin.site.register(GedungBangunanPenghapusanDistamben, GedungBangunanPenghapusanDistambenAdmin)
#### Class Peralatan Mesin
class TahunBerkurangPeralatanMesinDistambenInline(TahunBerkurangPeralatanMesinInline):
model = TahunBerkurangPeralatanMesinDistamben
class PenghapusanPeralatanMesinDistambenInline(PenghapusanPeralatanMesinInline):
model = PenghapusanPeralatanMesinDistamben
class SKPDAsalPeralatanMesinDistambenInline(SKPDAsalPeralatanMesinInline):
model = SKPDAsalPeralatanMesinDistamben
class SKPDTujuanPeralatanMesinDistambenInline(SKPDTujuanPeralatanMesinInline):
model = SKPDTujuanPeralatanMesinDistamben
class FotoPeralatanMesinDistambenInline(FotoPeralatanMesinInline):
model = FotoPeralatanMesinDistamben
class HargaPeralatanMesinDistambenInline(HargaPeralatanMesinInline):
model = HargaPeralatanMesinDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_peralatan_mesin":
kwargs["queryset"] = KontrakPeralatanMesin.objects.filter(id_skpd__exact=17)
return super(HargaPeralatanMesinDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusPeralatanMesinDistambenInline(TahunBerkurangUsulHapusPeralatanMesinInline):
model = TahunBerkurangUsulHapusPeralatanMesinDistamben
class PeralatanMesinDistambenAdmin(PeralatanMesinAdmin):
inlines = [HargaPeralatanMesinDistambenInline,
SKPDAsalPeralatanMesinDistambenInline,
FotoPeralatanMesinDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=17)
return super(PeralatanMesinDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class PeralatanMesinUsulHapusDistambenAdmin(PeralatanMesinUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusPeralatanMesinDistambenInline,
SKPDAsalPeralatanMesinDistambenInline,
FotoPeralatanMesinDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakPeralatanMesinDistambenAdmin(KontrakPeralatanMesinAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakPeralatanMesinDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaPeralatanMesinDistambenAdmin(HargaPeralatanMesinAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
peralatan_mesin_qs = PeralatanMesin.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_peralatan_mesin__in=peralatan_mesin_qs)
class PeralatanMesinPenghapusanDistambenAdmin(PeralatanMesinPenghapusanAdmin):
inlines = [PenghapusanPeralatanMesinDistambenInline, TahunBerkurangPeralatanMesinDistambenInline,
SKPDAsalPeralatanMesinDistambenInline,
SKPDTujuanPeralatanMesinDistambenInline,
FotoPeralatanMesinDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register PeralatanMesin Distamben
admin.site.register(PeralatanMesinDistamben, PeralatanMesinDistambenAdmin)
admin.site.register(PeralatanMesinUsulHapusDistamben, PeralatanMesinUsulHapusDistambenAdmin)
admin.site.register(KontrakPeralatanMesinDistamben, KontrakPeralatanMesinDistambenAdmin)
admin.site.register(HargaPeralatanMesinDistamben, HargaPeralatanMesinDistambenAdmin)
admin.site.register(PeralatanMesinPenghapusanDistamben, PeralatanMesinPenghapusanDistambenAdmin)
#### Jalan, Irigasi, dan Jaringan
from jalanirigasijaringan.models import KontrakJalanIrigasiJaringan, HargaJalanIrigasiJaringan, JalanIrigasiJaringan, PenghapusanJalanIrigasiJaringan, PemanfaatanJalanIrigasiJaringan, TahunBerkurangJalanIrigasiJaringan, TahunBerkurangUsulHapusJalanIrigasiJaringan
from jalanirigasijaringan.models import JalanIrigasiJaringanPemanfaatan, JalanIrigasiJaringanPenghapusan, JalanIrigasiJaringanUsulHapus
from jalanirigasijaringan.models import JalanIrigasiJaringanDistamben, KontrakJalanIrigasiJaringanDistamben, HargaJalanIrigasiJaringanDistamben, KDPJalanIrigasiJaringanDistamben, JalanIrigasiJaringanUsulHapusDistamben, TahunBerkurangUsulHapusJalanIrigasiJaringanDistamben
from jalanirigasijaringan.models import JalanIrigasiJaringanPenghapusanDistamben, TahunBerkurangJalanIrigasiJaringanDistamben, PenghapusanJalanIrigasiJaringanDistamben
from jalanirigasijaringan.models import SKPDAsalJalanIrigasiJaringanDistamben, SKPDTujuanJalanIrigasiJaringanDistamben, FotoJalanIrigasiJaringanDistamben
from jalanirigasijaringan.admin import HargaJalanIrigasiJaringanInline, JalanIrigasiJaringanAdmin, KontrakJalanIrigasiJaringanAdmin, HargaJalanIrigasiJaringanAdmin, KDPJalanIrigasiJaringanAdmin, TahunBerkurangUsulHapusJalanIrigasiJaringanInline, JalanIrigasiJaringanUsulHapusAdmin
from jalanirigasijaringan.admin import TahunBerkurangJalanIrigasiJaringanInline, PenghapusanJalanIrigasiJaringanInline, JalanIrigasiJaringanPenghapusanAdmin
from jalanirigasijaringan.admin import SKPDAsalJalanIrigasiJaringanInline, SKPDTujuanJalanIrigasiJaringanInline, FotoJalanIrigasiJaringanInline
#### Class Jalan, Irigasi dan Jaringan
class TahunBerkurangJalanIrigasiJaringanDistambenInline(TahunBerkurangJalanIrigasiJaringanInline):
model = TahunBerkurangJalanIrigasiJaringanDistamben
class PenghapusanJalanIrigasiJaringanDistambenInline(PenghapusanJalanIrigasiJaringanInline):
model = PenghapusanJalanIrigasiJaringanDistamben
class SKPDAsalJalanIrigasiJaringanDistambenInline(SKPDAsalJalanIrigasiJaringanInline):
model = SKPDAsalJalanIrigasiJaringanDistamben
class SKPDTujuanJalanIrigasiJaringanDistambenInline(SKPDTujuanJalanIrigasiJaringanInline):
model = SKPDTujuanJalanIrigasiJaringanDistamben
class FotoJalanIrigasiJaringanDistambenInline(FotoJalanIrigasiJaringanInline):
model = FotoJalanIrigasiJaringanDistamben
class HargaJalanIrigasiJaringanDistambenInline(HargaJalanIrigasiJaringanInline):
model = HargaJalanIrigasiJaringanDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_jalan_irigasi_jaringan":
kwargs["queryset"] = KontrakJalanIrigasiJaringan.objects.filter(id_skpd__exact=17)
return super(HargaJalanIrigasiJaringanDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusJalanIrigasiJaringanDistambenInline(TahunBerkurangUsulHapusJalanIrigasiJaringanInline):
model = TahunBerkurangUsulHapusJalanIrigasiJaringanDistamben
class JalanIrigasiJaringanDistambenAdmin(JalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(JalanIrigasiJaringanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=5)
class JalanIrigasiJaringanUsulHapusDistambenAdmin(JalanIrigasiJaringanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=3)
class KDPJalanIrigasiJaringanDistambenAdmin(KDPJalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(KDPJalanIrigasiJaringanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class KontrakJalanIrigasiJaringanDistambenAdmin(KontrakJalanIrigasiJaringanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakJalanIrigasiJaringanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaJalanIrigasiJaringanDistambenAdmin(HargaJalanIrigasiJaringanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
jalan_irigasi_jaringan_qs = JalanIrigasiJaringan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_jalan_irigasi_jaringan__in=jalan_irigasi_jaringan_qs)
class JalanIrigasiJaringanPenghapusanDistambenAdmin(JalanIrigasiJaringanPenghapusanAdmin):
inlines = [PenghapusanJalanIrigasiJaringanDistambenInline, TahunBerkurangJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
SKPDTujuanJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register JalanIrigasiJaringan Distamben
admin.site.register(JalanIrigasiJaringanDistamben, JalanIrigasiJaringanDistambenAdmin)
admin.site.register(JalanIrigasiJaringanUsulHapusDistamben, JalanIrigasiJaringanUsulHapusDistambenAdmin)
admin.site.register(KDPJalanIrigasiJaringanDistamben, KDPJalanIrigasiJaringanDistambenAdmin)
admin.site.register(KontrakJalanIrigasiJaringanDistamben, KontrakJalanIrigasiJaringanDistambenAdmin)
admin.site.register(HargaJalanIrigasiJaringanDistamben, HargaJalanIrigasiJaringanDistambenAdmin)
admin.site.register(JalanIrigasiJaringanPenghapusanDistamben, JalanIrigasiJaringanPenghapusanDistambenAdmin)
#### Aset Tetap Lainnya
from atl.models import KontrakATL, HargaATL, ATL, PenghapusanATL, PemanfaatanATL, TahunBerkurangATL, TahunBerkurangUsulHapusATL
from atl.models import ATLPemanfaatan, ATLPenghapusan, ATLUsulHapus
from atl.models import ATLDistamben, KontrakATLDistamben, HargaATLDistamben, ATLUsulHapusDistamben, TahunBerkurangUsulHapusATLDistamben
from atl.models import ATLPenghapusanDistamben, TahunBerkurangATLDistamben, PenghapusanATLDistamben
from atl.models import SKPDAsalATLDistamben, SKPDTujuanATLDistamben, FotoATLDistamben
from atl.admin import HargaATLInline, ATLAdmin, KontrakATLAdmin, HargaATLAdmin, TahunBerkurangUsulHapusATLInline, ATLUsulHapusAdmin
from atl.admin import TahunBerkurangATLInline, PenghapusanATLInline, ATLPenghapusanAdmin
from atl.admin import SKPDAsalATLInline, SKPDTujuanATLInline, FotoATLInline
#### Class Aset Tetap Lainnya
class TahunBerkurangATLDistambenInline(TahunBerkurangATLInline):
model = TahunBerkurangATLDistamben
class PenghapusanATLDistambenInline(PenghapusanATLInline):
model = PenghapusanATLDistamben
class SKPDAsalATLDistambenInline(SKPDAsalATLInline):
model = SKPDAsalATLDistamben
class SKPDTujuanATLDistambenInline(SKPDTujuanATLInline):
model = SKPDTujuanATLDistamben
class FotoATLDistambenInline(FotoATLInline):
model = FotoATLDistamben
class HargaATLDistambenInline(HargaATLInline):
model = HargaATLDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_atl":
kwargs["queryset"] = KontrakATL.objects.filter(id_skpd__exact=17)
return super(HargaATLDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusATLDistambenInline(TahunBerkurangUsulHapusATLInline):
model = TahunBerkurangUsulHapusATLDistamben
class ATLDistambenAdmin(ATLAdmin):
inlines = [HargaATLDistambenInline,
SKPDAsalATLDistambenInline,
FotoATLDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=17)
return super(ATLDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class ATLUsulHapusDistambenAdmin(ATLUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusATLDistambenInline,
SKPDAsalATLDistambenInline,
FotoATLDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=5).filter(id_mutasi_berkurang__exact=3)
class KontrakATLDistambenAdmin(KontrakATLAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakATLDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaATLDistambenAdmin(HargaATLAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
atl_qs = ATL.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_atl__in=atl_qs)
class ATLPenghapusanDistambenAdmin(ATLPenghapusanAdmin):
inlines = [PenghapusanATLDistambenInline, TahunBerkurangATLDistambenInline,
SKPDAsalATLDistambenInline,
SKPDTujuanATLDistambenInline,
FotoATLDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register ATL Distamben
admin.site.register(ATLDistamben, ATLDistambenAdmin)
admin.site.register(ATLUsulHapusDistamben, ATLUsulHapusDistambenAdmin)
admin.site.register(KontrakATLDistamben, KontrakATLDistambenAdmin)
admin.site.register(HargaATLDistamben, HargaATLDistambenAdmin)
admin.site.register(ATLPenghapusanDistamben, ATLPenghapusanDistambenAdmin)
| 42.254623
| 357
| 0.809291
|
Barang, HakTanah, SatuanBarang, KeadaanBarang, SKPenghapusan, MutasiBerkurang, JenisPemanfaatan, AsalUsul, Tahun, GolonganBarang, Tanah, KontrakTanah, PenghapusanTanah, TanahPenghapusan, PemanfaatanTanah, TanahPemanfaatan, HargaTanah, TahunBerkurangUsulHapusTanah, TanahUsulHapus
TanahDistamben, KontrakTanahDistamben, HargaTanahDistamben, TanahUsulHapusDistamben, TahunBerkurangUsulHapusTanahDistamben
from umum.models import TanahPenghapusanDistamben, TahunBerkurangTanahDistamben, PenghapusanTanahDistamben
from umum.models import SKPDAsalTanahDistamben, SKPDTujuanTanahDistamben, FotoTanahDistamben
from umum.admin import HargaTanahInline, TanahAdmin, KontrakTanahAdmin, HargaTanahAdmin, TahunBerkurangUsulHapusTanahInline, TanahUsulHapusAdmin
from umum.admin import TahunBerkurangTanahInline, PenghapusanTanahInline, TanahPenghapusanAdmin
from umum.admin import SKPDAsalTanahInline, SKPDTujuanTanahInline, FotoTanahInline
from umum.admin import GedungBangunanInline
usBeton, KontrakGedungBangunan, HargaGedungBangunan, GedungBangunan, PenghapusanGedungBangunan, PemanfaatanGedungBangunan, TahunBerkurangGedungBangunan, Ruangan, TahunBerkurangUsulHapusGedung
from gedungbangunan.models import GedungBangunanPemanfaatan, GedungBangunanPenghapusan, GedungBangunanRuangan, GedungBangunanUsulHapus
from gedungbangunan.models import GedungBangunanDistamben, KontrakGedungBangunanDistamben, HargaGedungBangunanDistamben, GedungBangunanRuanganDistamben, GedungBangunanUsulHapusDistamben, TahunBerkurangUsulHapusGedungDistamben
from gedungbangunan.models import GedungBangunanPenghapusanDistamben, TahunBerkurangGedungBangunanDistamben, PenghapusanGedungBangunanDistamben
from gedungbangunan.models import SKPDAsalGedungBangunanDistamben, SKPDTujuanGedungBangunanDistamben, FotoGedungBangunanDistamben
from gedungbangunan.admin import HargaGedungBangunanInline, GedungBangunanAdmin, KontrakGedungBangunanAdmin, HargaGedungBangunanAdmin, RuanganInline, GedungBangunanRuanganAdmin, KDPGedungBangunanAdmin, TahunBerkurangUsulHapusGedungInline, GedungBangunanUsulHapusAdmin
from gedungbangunan.admin import TahunBerkurangGedungBangunanInline, PenghapusanGedungBangunanInline, GedungBangunanPenghapusanAdmin
from gedungbangunan.admin import SKPDAsalGedungBangunanInline, SKPDTujuanGedungBangunanInline, FotoGedungBangunanInline
in, HargaPeralatanMesin, PeralatanMesin, PenghapusanPeralatanMesin, PemanfaatanPeralatanMesin, TahunBerkurangPeralatanMesin, TahunBerkurangUsulHapusPeralatanMesin
from peralatanmesin.models import PeralatanMesinPemanfaatan, PeralatanMesinPenghapusan, PeralatanMesinUsulHapus
from peralatanmesin.models import PeralatanMesinDistamben, KontrakPeralatanMesinDistamben, HargaPeralatanMesinDistamben, PeralatanMesinUsulHapusDistamben, TahunBerkurangUsulHapusPeralatanMesinDistamben
from peralatanmesin.models import PeralatanMesinPenghapusanDistamben, TahunBerkurangPeralatanMesinDistamben, PenghapusanPeralatanMesinDistamben
from peralatanmesin.models import SKPDAsalPeralatanMesinDistamben, SKPDTujuanPeralatanMesinDistamben, FotoPeralatanMesinDistamben
from peralatanmesin.admin import HargaPeralatanMesinInline, PeralatanMesinAdmin, KontrakPeralatanMesinAdmin, HargaPeralatanMesinAdmin, TahunBerkurangUsulHapusPeralatanMesinInline, PeralatanMesinUsulHapusAdmin
from peralatanmesin.admin import TahunBerkurangPeralatanMesinInline, PenghapusanPeralatanMesinInline, PeralatanMesinPenghapusanAdmin
from peralatanmesin.admin import SKPDAsalPeralatanMesinInline, SKPDTujuanPeralatanMesinInline, FotoPeralatanMesinInline
TahunBerkurangTanahInline):
model = TahunBerkurangTanahDistamben
class PenghapusanTanahDistambenInline(PenghapusanTanahInline):
model = PenghapusanTanahDistamben
class SKPDAsalTanahDistambenInline(SKPDAsalTanahInline):
model = SKPDAsalTanahDistamben
class SKPDTujuanTanahDistambenInline(SKPDTujuanTanahInline):
model = SKPDTujuanTanahDistamben
class FotoTanahDistambenInline(FotoTanahInline):
model = FotoTanahDistamben
class GedungBangunanDistambenInline(GedungBangunanInline):
model = GedungBangunanDistamben
class HargaTanahDistambenInline(HargaTanahInline):
model = HargaTanahDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak":
kwargs["queryset"] = KontrakTanah.objects.filter(id_skpd__exact=17)
return super(HargaTanahDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusTanahDistambenInline(TahunBerkurangUsulHapusTanahInline):
model = TahunBerkurangUsulHapusTanahDistamben
class TanahDistambenAdmin(TanahAdmin):
inlines = [HargaTanahDistambenInline,
SKPDAsalTanahDistambenInline,
FotoTanahDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(TanahDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class TanahUsulHapusDistambenAdmin(TanahUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusTanahDistambenInline,
SKPDAsalTanahDistambenInline,
FotoTanahDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakTanahDistambenAdmin(KontrakTanahAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakTanahDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaTanahDistambenAdmin(HargaTanahAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
tanah_qs = Tanah.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_tanah__in=tanah_qs)
class TanahPenghapusanDistambenAdmin(TanahPenghapusanAdmin):
inlines = [PenghapusanTanahDistambenInline, TahunBerkurangTanahDistambenInline,
SKPDAsalTanahDistambenInline,
SKPDTujuanTanahDistambenInline,
FotoTanahDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
min)
admin.site.register(TanahUsulHapusDistamben, TanahUsulHapusDistambenAdmin)
admin.site.register(KontrakTanahDistamben, KontrakTanahDistambenAdmin)
admin.site.register(HargaTanahDistamben, HargaTanahDistambenAdmin)
admin.site.register(TanahPenghapusanDistamben, TanahPenghapusanDistambenAdmin)
from gedungbangunan.models import KDPGedungBangunanDistamben
e):
model = TahunBerkurangGedungBangunanDistamben
class PenghapusanGedungBangunanDistambenInline(PenghapusanGedungBangunanInline):
model = PenghapusanGedungBangunanDistamben
class SKPDAsalGedungBangunanDistambenInline(SKPDAsalGedungBangunanInline):
model = SKPDAsalGedungBangunanDistamben
class SKPDTujuanGedungBangunanDistambenInline(SKPDTujuanGedungBangunanInline):
model = SKPDTujuanGedungBangunanDistamben
class FotoGedungBangunanDistambenInline(FotoGedungBangunanInline):
model = FotoGedungBangunanDistamben
class HargaGedungBangunanDistambenInline(HargaGedungBangunanInline):
model = HargaGedungBangunanDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_gedung_bangunan":
kwargs["queryset"] = KontrakGedungBangunan.objects.filter(id_skpd__exact=17)
return super(HargaGedungBangunanDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusGedungDistambenInline(TahunBerkurangUsulHapusGedungInline):
model = TahunBerkurangUsulHapusGedungDistamben
class GedungBangunanDistambenAdmin(GedungBangunanAdmin):
inlines = [HargaGedungBangunanDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(GedungBangunanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class KDPGedungBangunanDistambenAdmin(KDPGedungBangunanAdmin):
inlines = [HargaGedungBangunanDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(KDPGedungBangunanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanRuanganDistambenAdmin(GedungBangunanRuanganAdmin):
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanUsulHapusDistambenAdmin(GedungBangunanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusGedungDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=3)
class KontrakGedungBangunanDistambenAdmin(KontrakGedungBangunanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakGedungBangunanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaGedungBangunanDistambenAdmin(HargaGedungBangunanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
gedung_bangunan_qs = GedungBangunan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_gedung_bangunan__in=gedung_bangunan_qs)
class GedungBangunanPenghapusanDistambenAdmin(GedungBangunanPenghapusanAdmin):
inlines = [PenghapusanGedungBangunanDistambenInline, TahunBerkurangGedungBangunanDistambenInline,
SKPDAsalGedungBangunanDistambenInline,
SKPDTujuanGedungBangunanDistambenInline,
FotoGedungBangunanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
Admin)
admin.site.register(KDPGedungBangunanDistamben, KDPGedungBangunanDistambenAdmin)
admin.site.register(GedungBangunanRuanganDistamben, GedungBangunanRuanganDistambenAdmin)
admin.site.register(GedungBangunanUsulHapusDistamben, GedungBangunanUsulHapusDistambenAdmin)
admin.site.register(KontrakGedungBangunanDistamben, KontrakGedungBangunanDistambenAdmin)
admin.site.register(HargaGedungBangunanDistamben, HargaGedungBangunanDistambenAdmin)
admin.site.register(GedungBangunanPenghapusanDistamben, GedungBangunanPenghapusanDistambenAdmin)
anMesinInline):
model = TahunBerkurangPeralatanMesinDistamben
class PenghapusanPeralatanMesinDistambenInline(PenghapusanPeralatanMesinInline):
model = PenghapusanPeralatanMesinDistamben
class SKPDAsalPeralatanMesinDistambenInline(SKPDAsalPeralatanMesinInline):
model = SKPDAsalPeralatanMesinDistamben
class SKPDTujuanPeralatanMesinDistambenInline(SKPDTujuanPeralatanMesinInline):
model = SKPDTujuanPeralatanMesinDistamben
class FotoPeralatanMesinDistambenInline(FotoPeralatanMesinInline):
model = FotoPeralatanMesinDistamben
class HargaPeralatanMesinDistambenInline(HargaPeralatanMesinInline):
model = HargaPeralatanMesinDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_peralatan_mesin":
kwargs["queryset"] = KontrakPeralatanMesin.objects.filter(id_skpd__exact=17)
return super(HargaPeralatanMesinDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusPeralatanMesinDistambenInline(TahunBerkurangUsulHapusPeralatanMesinInline):
model = TahunBerkurangUsulHapusPeralatanMesinDistamben
class PeralatanMesinDistambenAdmin(PeralatanMesinAdmin):
inlines = [HargaPeralatanMesinDistambenInline,
SKPDAsalPeralatanMesinDistambenInline,
FotoPeralatanMesinDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=17)
return super(PeralatanMesinDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class PeralatanMesinUsulHapusDistambenAdmin(PeralatanMesinUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusPeralatanMesinDistambenInline,
SKPDAsalPeralatanMesinDistambenInline,
FotoPeralatanMesinDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakPeralatanMesinDistambenAdmin(KontrakPeralatanMesinAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakPeralatanMesinDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaPeralatanMesinDistambenAdmin(HargaPeralatanMesinAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
peralatan_mesin_qs = PeralatanMesin.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_peralatan_mesin__in=peralatan_mesin_qs)
class PeralatanMesinPenghapusanDistambenAdmin(PeralatanMesinPenghapusanAdmin):
inlines = [PenghapusanPeralatanMesinDistambenInline, TahunBerkurangPeralatanMesinDistambenInline,
SKPDAsalPeralatanMesinDistambenInline,
SKPDTujuanPeralatanMesinDistambenInline,
FotoPeralatanMesinDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
Admin)
admin.site.register(PeralatanMesinUsulHapusDistamben, PeralatanMesinUsulHapusDistambenAdmin)
admin.site.register(KontrakPeralatanMesinDistamben, KontrakPeralatanMesinDistambenAdmin)
admin.site.register(HargaPeralatanMesinDistamben, HargaPeralatanMesinDistambenAdmin)
admin.site.register(PeralatanMesinPenghapusanDistamben, PeralatanMesinPenghapusanDistambenAdmin)
an, JalanIrigasiJaringan, PenghapusanJalanIrigasiJaringan, PemanfaatanJalanIrigasiJaringan, TahunBerkurangJalanIrigasiJaringan, TahunBerkurangUsulHapusJalanIrigasiJaringan
from jalanirigasijaringan.models import JalanIrigasiJaringanPemanfaatan, JalanIrigasiJaringanPenghapusan, JalanIrigasiJaringanUsulHapus
from jalanirigasijaringan.models import JalanIrigasiJaringanDistamben, KontrakJalanIrigasiJaringanDistamben, HargaJalanIrigasiJaringanDistamben, KDPJalanIrigasiJaringanDistamben, JalanIrigasiJaringanUsulHapusDistamben, TahunBerkurangUsulHapusJalanIrigasiJaringanDistamben
from jalanirigasijaringan.models import JalanIrigasiJaringanPenghapusanDistamben, TahunBerkurangJalanIrigasiJaringanDistamben, PenghapusanJalanIrigasiJaringanDistamben
from jalanirigasijaringan.models import SKPDAsalJalanIrigasiJaringanDistamben, SKPDTujuanJalanIrigasiJaringanDistamben, FotoJalanIrigasiJaringanDistamben
from jalanirigasijaringan.admin import HargaJalanIrigasiJaringanInline, JalanIrigasiJaringanAdmin, KontrakJalanIrigasiJaringanAdmin, HargaJalanIrigasiJaringanAdmin, KDPJalanIrigasiJaringanAdmin, TahunBerkurangUsulHapusJalanIrigasiJaringanInline, JalanIrigasiJaringanUsulHapusAdmin
from jalanirigasijaringan.admin import TahunBerkurangJalanIrigasiJaringanInline, PenghapusanJalanIrigasiJaringanInline, JalanIrigasiJaringanPenghapusanAdmin
from jalanirigasijaringan.admin import SKPDAsalJalanIrigasiJaringanInline, SKPDTujuanJalanIrigasiJaringanInline, FotoJalanIrigasiJaringanInline
l = TahunBerkurangJalanIrigasiJaringanDistamben
class PenghapusanJalanIrigasiJaringanDistambenInline(PenghapusanJalanIrigasiJaringanInline):
model = PenghapusanJalanIrigasiJaringanDistamben
class SKPDAsalJalanIrigasiJaringanDistambenInline(SKPDAsalJalanIrigasiJaringanInline):
model = SKPDAsalJalanIrigasiJaringanDistamben
class SKPDTujuanJalanIrigasiJaringanDistambenInline(SKPDTujuanJalanIrigasiJaringanInline):
model = SKPDTujuanJalanIrigasiJaringanDistamben
class FotoJalanIrigasiJaringanDistambenInline(FotoJalanIrigasiJaringanInline):
model = FotoJalanIrigasiJaringanDistamben
class HargaJalanIrigasiJaringanDistambenInline(HargaJalanIrigasiJaringanInline):
model = HargaJalanIrigasiJaringanDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_jalan_irigasi_jaringan":
kwargs["queryset"] = KontrakJalanIrigasiJaringan.objects.filter(id_skpd__exact=17)
return super(HargaJalanIrigasiJaringanDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusJalanIrigasiJaringanDistambenInline(TahunBerkurangUsulHapusJalanIrigasiJaringanInline):
model = TahunBerkurangUsulHapusJalanIrigasiJaringanDistamben
class JalanIrigasiJaringanDistambenAdmin(JalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(JalanIrigasiJaringanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=5)
class JalanIrigasiJaringanUsulHapusDistambenAdmin(JalanIrigasiJaringanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=3)
class KDPJalanIrigasiJaringanDistambenAdmin(KDPJalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
return super(KDPJalanIrigasiJaringanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class KontrakJalanIrigasiJaringanDistambenAdmin(KontrakJalanIrigasiJaringanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakJalanIrigasiJaringanDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaJalanIrigasiJaringanDistambenAdmin(HargaJalanIrigasiJaringanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
jalan_irigasi_jaringan_qs = JalanIrigasiJaringan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_jalan_irigasi_jaringan__in=jalan_irigasi_jaringan_qs)
class JalanIrigasiJaringanPenghapusanDistambenAdmin(JalanIrigasiJaringanPenghapusanAdmin):
inlines = [PenghapusanJalanIrigasiJaringanDistambenInline, TahunBerkurangJalanIrigasiJaringanDistambenInline,
SKPDAsalJalanIrigasiJaringanDistambenInline,
SKPDTujuanJalanIrigasiJaringanDistambenInline,
FotoJalanIrigasiJaringanDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
Admin)
admin.site.register(JalanIrigasiJaringanUsulHapusDistamben, JalanIrigasiJaringanUsulHapusDistambenAdmin)
admin.site.register(KDPJalanIrigasiJaringanDistamben, KDPJalanIrigasiJaringanDistambenAdmin)
admin.site.register(KontrakJalanIrigasiJaringanDistamben, KontrakJalanIrigasiJaringanDistambenAdmin)
admin.site.register(HargaJalanIrigasiJaringanDistamben, HargaJalanIrigasiJaringanDistambenAdmin)
admin.site.register(JalanIrigasiJaringanPenghapusanDistamben, JalanIrigasiJaringanPenghapusanDistambenAdmin)
TL, PemanfaatanATL, TahunBerkurangATL, TahunBerkurangUsulHapusATL
from atl.models import ATLPemanfaatan, ATLPenghapusan, ATLUsulHapus
from atl.models import ATLDistamben, KontrakATLDistamben, HargaATLDistamben, ATLUsulHapusDistamben, TahunBerkurangUsulHapusATLDistamben
from atl.models import ATLPenghapusanDistamben, TahunBerkurangATLDistamben, PenghapusanATLDistamben
from atl.models import SKPDAsalATLDistamben, SKPDTujuanATLDistamben, FotoATLDistamben
from atl.admin import HargaATLInline, ATLAdmin, KontrakATLAdmin, HargaATLAdmin, TahunBerkurangUsulHapusATLInline, ATLUsulHapusAdmin
from atl.admin import TahunBerkurangATLInline, PenghapusanATLInline, ATLPenghapusanAdmin
from atl.admin import SKPDAsalATLInline, SKPDTujuanATLInline, FotoATLInline
unBerkurangATLDistamben
class PenghapusanATLDistambenInline(PenghapusanATLInline):
model = PenghapusanATLDistamben
class SKPDAsalATLDistambenInline(SKPDAsalATLInline):
model = SKPDAsalATLDistamben
class SKPDTujuanATLDistambenInline(SKPDTujuanATLInline):
model = SKPDTujuanATLDistamben
class FotoATLDistambenInline(FotoATLInline):
model = FotoATLDistamben
class HargaATLDistambenInline(HargaATLInline):
model = HargaATLDistamben
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_atl":
kwargs["queryset"] = KontrakATL.objects.filter(id_skpd__exact=17)
return super(HargaATLDistambenInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusATLDistambenInline(TahunBerkurangUsulHapusATLInline):
model = TahunBerkurangUsulHapusATLDistamben
class ATLDistambenAdmin(ATLAdmin):
inlines = [HargaATLDistambenInline,
SKPDAsalATLDistambenInline,
FotoATLDistambenInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=17)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=17)
return super(ATLDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class ATLUsulHapusDistambenAdmin(ATLUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusATLDistambenInline,
SKPDAsalATLDistambenInline,
FotoATLDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=5).filter(id_mutasi_berkurang__exact=3)
class KontrakATLDistambenAdmin(KontrakATLAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=17)
return super(KontrakATLDistambenAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=17)
class HargaATLDistambenAdmin(HargaATLAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=17)
atl_qs = ATL.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_atl__in=atl_qs)
class ATLPenghapusanDistambenAdmin(ATLPenghapusanAdmin):
inlines = [PenghapusanATLDistambenInline, TahunBerkurangATLDistambenInline,
SKPDAsalATLDistambenInline,
SKPDTujuanATLDistambenInline,
FotoATLDistambenInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=17)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
Admin)
admin.site.register(ATLUsulHapusDistamben, ATLUsulHapusDistambenAdmin)
admin.site.register(KontrakATLDistamben, KontrakATLDistambenAdmin)
admin.site.register(HargaATLDistamben, HargaATLDistambenAdmin)
admin.site.register(ATLPenghapusanDistamben, ATLPenghapusanDistambenAdmin)
| true
| true
|
f71720ecd6a39740e6edfa1bbf6de6885bb541e1
| 1,132
|
py
|
Python
|
skpr/examples/coded_diffraction.py
|
PhilippPelz/scikit-pr-open
|
50833b13160b6afe0a743d63d560bddeee2c18b5
|
[
"MIT"
] | null | null | null |
skpr/examples/coded_diffraction.py
|
PhilippPelz/scikit-pr-open
|
50833b13160b6afe0a743d63d560bddeee2c18b5
|
[
"MIT"
] | null | null | null |
skpr/examples/coded_diffraction.py
|
PhilippPelz/scikit-pr-open
|
50833b13160b6afe0a743d63d560bddeee2c18b5
|
[
"MIT"
] | 1
|
2020-11-11T06:51:46.000Z
|
2020-11-11T06:51:46.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 11 10:11:21 2017
@author: philipp
"""
from skpr.core.engines import CodedMeasurementEngine
from skpr.core.models import FarfieldCodedMeasurementNet
from skpr.core.parameters import *
from skpr.inout.h5rw import h5read
from skpr.nn import modules as M
from skpr.util import *
p = get_default_parameters()
f = h5read('/home/pelzphil/projects/cd_sim.h5')
p.y = th.from_numpy(f['I'])
p.model = FarfieldCodedMeasurementNet
p.object.solution = th.from_numpy(f['ob'].astype(np.complex64))
p.object.update_start = 0
p.object.margins = 0
p.loss.function = M.TruncatedFarFieldPoissonLikelihood
p.loss.parameters.a_h = 10
gradient_mask_radius = 37
gradient_mask_falloff = 1
p.optimizer.object.type = th.optim.SGD
p.optimizer.object.parameters.lr = 1e-1 + 0j
p.logging.level = INFO
p.logging.log_reconstruction_parameters = False
p.logging.log_object_progress = False
p.logging.log_probe_progress = False
p.logging.log_error_progress = True
p.logging.print_summary = True
p.logging.print_report = True
p.epochs = 500
eng = CodedMeasurementEngine(p)
eng.fit()
| 22.196078
| 63
| 0.770318
|
from skpr.core.engines import CodedMeasurementEngine
from skpr.core.models import FarfieldCodedMeasurementNet
from skpr.core.parameters import *
from skpr.inout.h5rw import h5read
from skpr.nn import modules as M
from skpr.util import *
p = get_default_parameters()
f = h5read('/home/pelzphil/projects/cd_sim.h5')
p.y = th.from_numpy(f['I'])
p.model = FarfieldCodedMeasurementNet
p.object.solution = th.from_numpy(f['ob'].astype(np.complex64))
p.object.update_start = 0
p.object.margins = 0
p.loss.function = M.TruncatedFarFieldPoissonLikelihood
p.loss.parameters.a_h = 10
gradient_mask_radius = 37
gradient_mask_falloff = 1
p.optimizer.object.type = th.optim.SGD
p.optimizer.object.parameters.lr = 1e-1 + 0j
p.logging.level = INFO
p.logging.log_reconstruction_parameters = False
p.logging.log_object_progress = False
p.logging.log_probe_progress = False
p.logging.log_error_progress = True
p.logging.print_summary = True
p.logging.print_report = True
p.epochs = 500
eng = CodedMeasurementEngine(p)
eng.fit()
| true
| true
|
f71720f87d0474311aa36f05f3fea5fd99145fd6
| 861
|
py
|
Python
|
src/solutions/template.py
|
etillison3350/advent-of-code-2020
|
01fcb1ad79ccfc29dbdd0ef9839e7cf5ea28c156
|
[
"MIT"
] | null | null | null |
src/solutions/template.py
|
etillison3350/advent-of-code-2020
|
01fcb1ad79ccfc29dbdd0ef9839e7cf5ea28c156
|
[
"MIT"
] | null | null | null |
src/solutions/template.py
|
etillison3350/advent-of-code-2020
|
01fcb1ad79ccfc29dbdd0ef9839e7cf5ea28c156
|
[
"MIT"
] | null | null | null |
from helpers.executor import Executor
from helpers.util import *
import itertools
from itertools import *
import re
from re import *
import numpy as np
from typing import Any, Callable, Generator, Sequence
day, year = None, None # TODO: Update day and year for current day
split_seq = '\n'
class Solution(Executor):
def solve(self, r: Sequence[str], print: Callable[..., None]) -> Generator[Any, None, None]:
yield self._solve_part1(r, print)
yield self._solve_part2(r, print)
def _solve_part1(self, r: Sequence[str], print: Callable[..., None]) -> Any:
# TODO
return None
def _solve_part2(self, r: Sequence[str], print: Callable[..., None]) -> Any:
# TODO
return None
if __name__ == '__main__':
solution = Solution(year, day)
solution.execute(split_seq, use_cached_test_cases=True)
| 26.090909
| 96
| 0.674797
|
from helpers.executor import Executor
from helpers.util import *
import itertools
from itertools import *
import re
from re import *
import numpy as np
from typing import Any, Callable, Generator, Sequence
day, year = None, None
split_seq = '\n'
class Solution(Executor):
def solve(self, r: Sequence[str], print: Callable[..., None]) -> Generator[Any, None, None]:
yield self._solve_part1(r, print)
yield self._solve_part2(r, print)
def _solve_part1(self, r: Sequence[str], print: Callable[..., None]) -> Any:
return None
def _solve_part2(self, r: Sequence[str], print: Callable[..., None]) -> Any:
return None
if __name__ == '__main__':
solution = Solution(year, day)
solution.execute(split_seq, use_cached_test_cases=True)
| true
| true
|
f7172172e00faa7405c4c67e0bc74c55f26a1ecc
| 1,684
|
py
|
Python
|
pythonlibs/mantis/fanbei/smarthome/constants.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 22
|
2019-10-28T07:28:12.000Z
|
2022-03-19T15:36:41.000Z
|
pythonlibs/mantis/fanbei/smarthome/constants.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 1
|
2019-11-07T04:54:14.000Z
|
2019-11-07T07:12:48.000Z
|
pythonlibs/mantis/fanbei/smarthome/constants.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 13
|
2019-10-28T07:29:07.000Z
|
2021-11-03T06:53:12.000Z
|
#coding:utf-8
# DeviceActiveListKeyHash = 'blue_earth.device.active.list' # 存放所有上线设备id {a:Time,b:Time}
#
DeviceCommandQueue = 'smartbox.device.command.queue.{device_type}.{device_id}'
#
# DeviceSequence = 'blue_earth.device.sequence'
DeviceChannelPub = 'smartbox.device.channel.pub.{device_id}' # 设备所有原始数据读取之后分发的通道
DeviceAppChannelPub = 'smartbox.device.app.channel.pub.{device_id}' # 设备监控应用通道,所有的前端系统将订阅此通道将监控信息推送到前端App
# DeviceChannelPubIoT = 'smartbox.device_channel_iot.{device_id}' # 推送到绿城+的发布通道
DeviceChannelPubIoT = '{device_id}' # 推送到绿城+的发布通道
DeviceChannelPubTraverseDown = 'smartbox.down.pub.{device_id}' # 设备下发控制命令的通道
DeviceChannelPubTraverseUp = 'smartbox.up.pub.{device_id}' # 设备上行消息分发通道
# DevicePositionLastest = 'blue_earth.device.position.lastest.{device_id}' # 设备当前的坐标和运行信息
#
# DevicePositionRequestTimeKey = 'blue_earth.device.position.request.time.{}' # 发送定位设备请求命令的时间
#
# DeviceLandingServerKey = 'blue_earth.device.landing_server.{}' # 记录设备接入服务器 {url,landing_time}
#
# DeviceShareCodeCreateTimeKey = 'blue_earth.device.share_code.create_time.{}' # 分享码的生成时间
#
MaxLiveTimeDeviceLandingServerKey = 60*8
DeviceAccessHttpAPI = 'smartbox.device.api_server.{}' # 记录设备接入服务器 {url,landing_time}
# DeviceActiveListKeyHash = 'smartbox.active_device_list' # 存放所有上线设备与接入服务器的关联关系
DeviceServerRel = 'smartbox.device_server_rel' # 存放所有上线设备与接入服务器的关联关系
SensorStatusHash= 'smartbox.sensor.status.{device_id}.{sensor_type}.{sensor_id}' # {device_id}_{sensor_type}_{sensor_id}'
DeviceStatusHash = 'smartbox.device.status.{device_id}'
AppRequestAuthCodeWidthIdsPrefix = 'smartbox.authcode.ids.'
AppRequestAuthCodePrefix = 'smartbox.authcode.data.'
| 43.179487
| 121
| 0.785629
|
tbox.device.command.queue.{device_type}.{device_id}'
DeviceChannelPub = 'smartbox.device.channel.pub.{device_id}'
DeviceAppChannelPub = 'smartbox.device.app.channel.pub.{device_id}'
lPubIoT = '{device_id}'
DeviceChannelPubTraverseDown = 'smartbox.down.pub.{device_id}'
DeviceChannelPubTraverseUp = 'smartbox.up.pub.{device_id}'
PI = 'smartbox.device.api_server.{}'
martbox.device_server_rel'
SensorStatusHash= 'smartbox.sensor.status.{device_id}.{sensor_type}.{sensor_id}'
DeviceStatusHash = 'smartbox.device.status.{device_id}'
AppRequestAuthCodeWidthIdsPrefix = 'smartbox.authcode.ids.'
AppRequestAuthCodePrefix = 'smartbox.authcode.data.'
| true
| true
|
f7172286b14ddc62ce23b4339aa80a3cd495b4ef
| 27,950
|
py
|
Python
|
example_scripts/basic_project_stats.py
|
vrooje/Data-digging
|
ae4ee1de0df0d2686115510ac35f5960d5cfaf08
|
[
"MIT"
] | 40
|
2016-05-17T10:53:28.000Z
|
2022-02-08T05:02:21.000Z
|
example_scripts/basic_project_stats.py
|
vrooje/Data-digging
|
ae4ee1de0df0d2686115510ac35f5960d5cfaf08
|
[
"MIT"
] | 29
|
2016-03-17T03:17:03.000Z
|
2020-10-01T17:27:59.000Z
|
example_scripts/basic_project_stats.py
|
vrooje/Data-digging
|
ae4ee1de0df0d2686115510ac35f5960d5cfaf08
|
[
"MIT"
] | 34
|
2015-12-04T15:18:12.000Z
|
2021-08-13T13:35:49.000Z
|
#Python 2.7.9 (default, Apr 5 2015, 22:21:35)
# the full environment I used to test this is in basic_project_stats.yml
import sys
# file with raw classifications (csv)
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
print("\nUsage: %s classifications_infile" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.\n")
print(" Optional inputs:")
print(" workflow_id=N")
print(" specify the program should only consider classifications from workflow id N")
print(" workflow_version=M")
print(" specify the program should only consider classifications from workflow version M")
print(" (note the program will only consider the major version, i.e. the integer part)")
print(" outfile_csv=filename.csv")
print(" if you want the program to save a sub-file with only classification info from the workflow specified, give the filename here")
print(" --time_elapsed")
print(" specify the program should compute classification durations and total classification work effort")
print(" --remove_duplicates")
print(" remove duplicate classifications (subject-user pairs) before analysis.")
print(" memory-intensive for big files; probably best to pair with outfile_csv so you save the output.")
print(" --keep_nonlive")
print(" by default the program ignores classifications made while the project wasn't 'Live'; setting this will keep them in.")
print(" --keep_allcols")
print(" by default the program only keeps columns required for stats; use this with a specified outfile_csv to save all columns, including annotations. (If you're not using outfile_csv this will just waste memory.)")
print("\nAll output will be to stdout (about 1-2 paragraphs' worth).\n")
sys.exit(0)
import numpy as np # works in 1.10.1
import pandas as pd # works in 0.13.1
import datetime
import dateutil.parser
import json, ujson
import gc
# default value is not to care about workflow ID or version
workflow_id = -1
workflow_version = -1
# by default we won't worry about computing how much time effort the volunteers cumulatively spent
time_elapsed = False
# by default we won't write the subset of classifications we used to a new csv file
output_csv = False
# by default we'll ignore the possibility of duplicate classifications
# note duplicates are relatively rare, usually <2% of all classifications
# the Zooniverse has squashed several bugs related to this, but some still
# happen client-side and there's nothing we can do about that.
remove_duplicates = False
# by default, restrict the analysis to "Live" classifications
keep_nonlive = False
# by default, don't keep every column of the classifications when writing to an outfile
keep_allcols = False
# check for other command-line arguments
if len(sys.argv) > 2:
# if there are additional arguments, loop through them
for i_arg, argstr in enumerate(sys.argv[2:]):
arg = argstr.split('=')
if arg[0] == "workflow_id":
workflow_id = int(arg[1])
elif arg[0] == "workflow_version":
workflow_version = float(arg[1])
elif (arg[0] == "outfile_csv") | (arg[0] == "outfile"):
outfile_csv = arg[1]
output_csv = True
elif arg[0] == "--keep_allcols":
keep_allcols = True
elif arg[0] == "--time_elapsed":
time_elapsed = True
elif arg[0] == "--remove_duplicates":
remove_duplicates = True
elif arg[0] == "--keep_nonlive":
keep_nonlive = True
# columns currently in an exported Panoptes classification file:
# classification_id,user_name,user_id,user_ip,workflow_id,workflow_name,workflow_version,created_at,gold_standard,expert,metadata,annotations,subject_data,subject_ids
# classification_id identifies the specific classification - should be unique for each row in this file
# user_name is either their registered name or "not-logged-in"+their hashed IP
# user_id is their numeric Zooniverse ID or blank if they're unregistered
# user_ip is a hashed version of their IP
# workflow_id is the numeric ID of this workflow, which you can find in the project builder URL for managing the workflow:
# https://www.zooniverse.org/lab/[project_id]/workflow/[workflow_id]/
# workflow_name is the name you gave your workflow (for sanity checks)
# workflow_version is [bigchangecount].[smallchangecount] and is probably pretty big
# created_at is the date the entry for the classification was recorded
# gold_standard is 1 if this classification was done in gold standard mode
# expert is 1 if this classification was done in expert mode... I think
# metadata (json) is the data the browser sent along with the classification.
# Includes browser information, language, started_at and finished_at
# note started_at and finished_at are perhaps the easiest way to calculate the length of a classification
# (the duration elapsed between consecutive created_at by the same user is another way)
# the difference here is back-end vs front-end
# annotations (json) contains the actual classification information
# which for this analysis we will ignore completely, for now
# subject_data is cross-matched from the subjects table and is for convenience in data reduction
# subject_ids has just the subject ids in the given classification
# here we will ignore this too, except to count subjects once.
# we'll also ignore classification_id, user_ip, workflow information, gold_standard, and expert.
#
# Print out the input parameters just as a sanity check
print("Computing project stats using:")
print(" infile: %s" % classfile_in)
#################################################################################
#################################################################################
#################################################################################
# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient
#
# The Gini coefficient measures inequality in distributions of things.
# It was originally conceived for economics (e.g. where is the wealth in a country?
# in the hands of many citizens or a few?), but it's just as applicable to many
# other fields. In this case we'll use it to see how classifications are
# distributed among classifiers.
# G = 0 is a completely even distribution (everyone does the same number of
# classifications), and ~1 is uneven (~all the classifications are done
# by one classifier).
# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are
# in the range of 0.7-0.9.
# That range is generally indicative of a project with a loyal core group of
# volunteers who contribute the bulk of the classification effort, but balanced
# out by a regular influx of new classifiers trying out the project, from which
# you continue to draw to maintain a core group of prolific classifiers.
# Once your project is fairly well established, you can compare it to past Zooniverse
# projects to see how you're doing.
# If your G is << 0.7, you may be having trouble recruiting classifiers into a loyal
# group of volunteers. People are trying it, but not many are staying.
# If your G is > 0.9, it's a little more complicated. If your total classification
# count is lower than you'd like it to be, you may be having trouble recruiting
# classifiers to the project, such that your classification counts are
# dominated by a few people.
# But if you have G > 0.9 and plenty of classifications, this may be a sign that your
# loyal users are -really- committed, so a very high G is not necessarily a bad thing.
#
# Of course the Gini coefficient is a simplified measure that doesn't always capture
# subtle nuances and so forth, but it's still a useful broad metric.
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2
return (fair_area - area) / fair_area
#################################################################################
#################################################################################
#################################################################################
def get_duplicate_ids(grp):
# groupbys and dfs have slightly different indexing and just NOPE
#thegrp = pd.DataFrame(grp)
thegrp = grp
if len(thegrp) == 1:
return
else:
# we have a duplicate set, so return the details
return thegrp
def get_live_project(meta_json):
try:
return meta_json['live_project']
except:
# apparently some subject metadata doesn't have this? dunno?
return False
def get_live_project_incl_missing(meta_json):
try:
return meta_json['live_project']
except:
return -1
# Begin the main stuff
print("Reading classifications from %s" % classfile_in)
#classifications = pd.read_csv(classfile_in)
# the above will work but uses a LOT of memory for projects with > 1 million
# classifications. Nothing here uses the actual classification data so don't read it
'''
If you are using this code on an older project, where the data export is from
before subject_ids were exported as their own column, change "subject_id" below
to "subject_data", and then when you define the groupby "by_subject" and count
subjects, you'll need to use subject_data instead of subject_ids.
Apologies for doing this, but subject_data contains the whole manifest so for
big projects with big catalogs it can take up a lot of memory, so we don't want to
use it if we don't have to.
'''
cols_keep = ["classification_id", "user_name", "user_id", "user_ip", "workflow_id", "workflow_version", "created_at", "metadata", "subject_ids"]
if not keep_allcols:
try:
classifications = pd.read_csv(classfile_in, usecols=cols_keep)
except:
print("Some columns missing from classifications infile, reading without specifying columns (uses more memory)... ")
classifications = pd.read_csv(classfile_in)
else:
try:
classifications = pd.read_csv(classfile_in, low_memory=False)
except:
classifications = pd.read_csv(classfile_in)
cols_used = classifications.columns.tolist()
cols_out = classifications.columns.tolist()
if not 'created_day' in cols_used:
cols_used.append('created_day')
if not 'meta_json' in cols_used:
cols_used.append('meta_json')
n_class_raw = len(classifications)
# now restrict classifications to a particular workflow id/version if requested
if (workflow_id > 0) | (workflow_version > 0):
# only keep the stuff that matches these workflow properties
if (workflow_id > 0):
print("Considering only workflow id %d" % workflow_id)
in_workflow = classifications.workflow_id == workflow_id
else:
# the workflow id wasn't specified, so just make an array of true
in_workflow = np.array([True for q in classifications.workflow_id])
if (workflow_version > 0):
classifications['version_int'] = [int(q) for q in classifications.workflow_version]
print("Considering only major workflow version %d" % int(workflow_version))
# we only care about the major workflow version, not the minor version
in_version = classifications.version_int == int(workflow_version)
else:
in_version = np.array([True for q in classifications.workflow_version])
if (sum(in_workflow & in_version) == 0):
print("ERROR: your combination of workflow_id and workflow_version does not exist!\nIgnoring workflow id/version request and computing stats for ALL classifications instead.")
#classifications = classifications_all
else:
# select the subset of classifications
classifications = classifications[in_workflow & in_version]
del in_workflow
del in_version
else:
# just use everything
#classifications = classifications_all
workflow_ids = classifications.workflow_id.unique()
# this takes too much CPU time just for a print statement. Just use float versions
#classifications['version_int'] = [int(q) for q in classifications.workflow_version]
version_ints = classifications.workflow_version.unique()
print("Considering all classifications in workflow ids:")
print(workflow_ids)
print(" and workflow_versions:")
print(version_ints)
# Remove classifications collected before the project went Live
# note: it makes logical sense to do this *before* we extract the classifications
# from the workflow we care about, *but* the meta_json setting step (which we
# need in order to extract Live project status) can take a while (up to ~minutes)
# and adds to memory usage, so I'd rather do it after we've already culled
# the table of potentially a lot of unused rows.
# OTOH culling duplicates takes more time and memory than culling unused workflow
# versions, so wait to do that until after we've removed non-Live classifications
# first, extract the metadata column into a json we can read entries for
#
# ujson is quite a bit faster than json but seems to use a bit more memory as it works
classifications['meta_json'] = [ujson.loads(q) for q in classifications.metadata]
if keep_nonlive:
print("Retaining all non-live classifications in analysis.")
else:
# would that we could just do q['live_project'] but if that tag is missing for
# any classifications (which it is in some cases) it crashes
classifications['live_project'] = [get_live_project(q) for q in classifications.meta_json]
# if this line gives you an error you've read in this boolean as a string
# so need to convert "True" --> True and "False" --> False
class_live = classifications[classifications.live_project].copy()
n_class_thiswf = len(classifications)
n_live = sum(classifications.live_project)
n_notlive = n_class_thiswf - n_live
print(" Removing %d non-live classifications..." % n_notlive)
# don't make a slice but also save memory
classifications = pd.DataFrame(class_live)
del class_live
gc.collect()
# if we've been asked to remove duplicates, do that now
if remove_duplicates:
'''
a duplicate can be that the classification id is submitted twice by the client
but it can also be that the classifier classified the same subject twice in different classification_ids.
So identify duplicates based on username + subject id + workflow info, not based on classification_id.
'''
subj_classifications = classifications.groupby('user_name subject_ids workflow_id workflow_version'.split())
n_class = len(classifications)
# just take the first of each of the groups
classifications_nodups = subj_classifications.head(1)
n_class_nodup = len(classifications_nodups)
n_dups = n_class - n_class_nodup
if n_dups == 0:
print("Searched for duplicate classifications; none found.")
else:
duplicate_outfile = classfile_in.replace(".csv", "_duplicated_only.csv")
if duplicate_outfile == classfile_in:
duplicate_outfile += "_duplicated_only.csv"
print("Found %d duplicate classifications (%.2f percent of total)." % (n_dups, float(n_dups)/float(n_class)*100.0))
# get the duplicate classifications and save them before we remove them
#class_dups = pd.DataFrame(subj_classifications.apply(get_duplicate_ids))
# if you want to keep a record of everything with just the dups flagged,
# this is your thing
#dups_flagged = pd.merge(classifications, classifications_nodups['classification_id subject_id'.split()], how='outer', on='classification_id', suffixes=('', '_2'), indicator=True)
# if you just need something that has only the dups in it, here you go
dups_only = classifications[~classifications.isin(classifications_nodups)].dropna(how='all')
# dups_only has the duplicates only - not the original classification in each set
# i.e. if classifications 123, 456, and 789 are all from the same user
# classifying the same subject, dups_only will only contain classifications
# 456 and 789. When we save the duplicate classifications we want to save
# the initial classification (that was later duplicated) as well, so we
# need to retrieve those.
# I don't see a really easy way to do it based on the groupby we already did
# (subj_classifications)
# so let's just define what identifies the duplicate (user_name + subject_ids)
# and pick them out.
# even for a reasonably big dataset this is relatively fast (seconds, not minutes)
try:
dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(int).astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)
except:
dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)
# n_dup_pairs tracks unique user-subject pairs that were duplicated
dup_pairs = dups_only['user_subj_pair'].unique()
n_dup_pairs = len(dup_pairs)
try:
classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(int).astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)
except:
classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)
# this keeps things that are any part of a duplicate set, including first
is_a_dup = classifications['user_subj_pair'].isin(dup_pairs)
class_dups = classifications[is_a_dup].copy()
# counts any classification that is any part of a duplicate set
n_partofdup = len(class_dups)
class_dups.to_csv(duplicate_outfile)
#print(class_dups.head(3))
# now throw away the duplicates (but keep the first of each set) from
# the main classifications table
classifications = pd.DataFrame(classifications_nodups)
del class_dups
del is_a_dup
print("Duplicates removed from analysis (%d unique user-subject-workflow groups)." % n_dup_pairs)
del subj_classifications
del classifications_nodups
gc.collect()
classifications['created_day'] = [q[:10] for q in classifications.created_at]
first_class_day = min(classifications.created_day).replace(' ', '')
last_class_day = max(classifications.created_day).replace(' ', '')
# save processing time and memory in the groupby.apply(); only keep the columns we're going to use or want to save
if output_csv:
if not keep_allcols:
# if we'll be writing to a file at the end of this we need to save a few extra columns
cols_used = ["classification_id", "user_name", "user_id", "user_ip", "created_at", "created_day", "metadata", "meta_json", "subject_ids", "workflow_id", "workflow_version"]
else:
if not keep_allcols:
cols_used = ["classification_id", "user_name", "user_id", "user_ip", "created_at", "created_day", "meta_json", "subject_ids"]
classifications = classifications[cols_used]
# collect() calls PyInt_ClearFreeList(), so explicitly helps free some active memory
gc.collect()
# grab the subject counts
n_subj_tot = len(classifications.subject_ids.unique())
by_subject = classifications.groupby('subject_ids')
subj_class = by_subject.created_at.aggregate('count')
# basic stats on how classified the subjects are
subj_class_mean = np.mean(subj_class)
subj_class_med = np.median(subj_class)
subj_class_min = np.min(subj_class)
subj_class_max = np.max(subj_class)
# free up some memory - note calling this does take CPU time but
# can free up GBs of active memory for big classification files
del by_subject
gc.collect()
# index by created_at as a timeseries
# note: this means things might not be uniquely indexed
# but it makes a lot of things easier and faster.
# update: it's not really needed in the main bit, but will do it on each group later.
#classifications.set_index('created_at_ts', inplace=True)
# get some user information
all_users = classifications.user_name.unique()
by_user = classifications.groupby('user_name')
# also count IP addresses
n_ip = len(classifications.user_ip.unique())
# get total classification and user counts
n_class_tot = len(classifications)
n_users_tot = len(all_users)
unregistered = [q.startswith("not-logged-in") for q in all_users]
n_unreg = sum(unregistered)
n_reg = n_users_tot - n_unreg
is_unreg_class = [q.startswith("not-logged-in") for q in classifications.user_name]
n_unreg_class = sum(is_unreg_class)
n_reg_class = n_class_tot - n_unreg_class
# for the leaderboard, which I recommend project builders never make public because
# Just Say No to gamification
# But it's still interesting to see who your most prolific classifiers are, and
# e.g. whether they're also your most prolific Talk users
nclass_byuser = by_user.created_at.aggregate('count')
nclass_byuser_ranked = nclass_byuser.copy()
nclass_byuser_ranked.sort_values(inplace=True, ascending=False)
# rename the columns properly so they'll print as useful csv headers
nclass_byuser_ranked.name = 'user_name'
nc = pd.DataFrame(nclass_byuser_ranked)
nc.columns = ['n_class']
# write this to a file, so you don't have to re-calculate it later
nclass_byuser_outfile = classfile_in.replace(".csv", "_nclass_byuser_ranked.csv")
# don't accidentally overwrite the classifications file just because someone
# renamed it to not end in .csv
if nclass_byuser_outfile == classfile_in:
nclass_byuser_outfile = "project_nclass_byuser_ranked.csv"
nc.to_csv(nclass_byuser_outfile)
# very basic stats
nclass_med = np.median(nclass_byuser)
nclass_mean = np.mean(nclass_byuser)
# Gini coefficient - see the comments above the gini() function for more notes
nclass_gini = gini(nclass_byuser)
print("\nOverall:\n\n%d classifications of %d subjects by %d classifiers," % (n_class_tot,n_subj_tot,n_users_tot))
print("%d logged in and %d not logged in, from %d unique IP addresses." % (n_reg,n_unreg,n_ip))
print("%d classifications were from logged-in users, %d from not-logged-in users.\n" % (n_reg_class, n_unreg_class))
print("That's %.2f classifications per subject on average (median = %.1f)." % (subj_class_mean, subj_class_med))
print("The most classified subject has %d classifications; the least-classified subject has %d.\n" % (subj_class_max,subj_class_min))
print("Median number of classifications per user: %.2f" %nclass_med)
print("Mean number of classifications per user: %.2f" % nclass_mean)
print("\nTop 10 most prolific classifiers:")
print(nclass_byuser_ranked.head(10))
print("\n\nGini coefficient for classifications by user: %.2f" % nclass_gini)
print("\nClassifications were collected between %s and %s." % (first_class_day, last_class_day))
print("The highest classification id considered here is %d.\n" % max(classifications.classification_id))
# if the input specified we should compute total time spent by classifiers, compute it
if time_elapsed:
# free up some memory
# do this inside the if because if we're not computing times then the program
# is about to end so this memory will be freed up anyway
del unregistered
del by_user
gc.collect()
classifications['started_at_str'] = [q['started_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
sa_temp = classifications['started_at_str']
fa_temp = classifications['finished_at_str']
#print("Creating timeseries...")#,datetime.datetime.now().strftime('%H:%M:%S.%f')
try:
classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print("Oops:\n%s" % the_error)
try:
classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print("Oops:\n%s" % the_error)
classifications['started_at'] = pd.to_datetime(sa_temp)
try:
classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print("Oops:\n%s" % the_error)
try:
classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print("Oops:\n%s" % the_error)
classifications['finished_at'] = pd.to_datetime(fa_temp)
# we did all that above so that this would only take one line and be quite fast
classifications['class_t_length'] = (classifications.finished_at - classifications.started_at)
# throw away absurd time counts: accept lengths between 0 < dt < 30 minutes
# anything outside that is either a wrongly reported time or the user walked away from their computer
ok_times = (classifications.class_t_length > np.timedelta64(0, 's')) & (classifications.class_t_length < np.timedelta64(30, 'm'))
# how many turned out to be okay?
n_t_ok = sum(ok_times)
# compute total times
time_spent_classifying = np.sum(classifications['class_t_length'][ok_times])
days_spent_classifying = time_spent_classifying / np.timedelta64(1, 'D')
frac_good_durations = float(n_t_ok)/float(n_class_tot)
print("Based on %d classifications (%.1f percent) where we can probably\ntrust the classification durations, the classifiers spent a total of %.2f days\n(or %.2f years) classifying in the project.\n" % (n_t_ok, frac_good_durations*100., days_spent_classifying, days_spent_classifying / 365.))
mean_t_class = np.mean(classifications['class_t_length'][ok_times])
median_t_class = np.median(classifications['class_t_length'][ok_times])
human_effort_extrap = float(n_class_tot)*float(mean_t_class / np.timedelta64(1, 'D')) / 365. # in years
print("Mean classification length: %8.1f seconds" % float(mean_t_class / np.timedelta64(1, 's')))
print("Median classification length: %6.1f seconds" % float(median_t_class / np.timedelta64(1, 's')))
print("\nIf we use the mean to extrapolate and include the %.1f percent of\nclassifications where the reported duration had an error, that means\nthe total time spent is equivalent to %.2f years of human effort, or\n%.2f years of FTE (1 person working 40 hours/week, no holiday.)\n" % ((1-frac_good_durations)*100., human_effort_extrap, human_effort_extrap * (24.*7.)/40.))
if output_csv:
# free up what memory we can before doing this (matters for big files)
if time_elapsed:
del ok_times
del sa_temp
del fa_temp
del nclass_byuser
del all_users
del subj_class
gc.collect()
if keep_allcols:
classifications[cols_out].to_csv(outfile_csv)
else:
classifications.to_csv(outfile_csv)
print("File with used subset of classification info written to %s ." % outfile_csv)
print("File with ranked list of user classification counts written to %s ." % nclass_byuser_outfile)
if remove_duplicates:
if (n_dups > 0):
print("Saved info for all classifications that have duplicates to %s ." % duplicate_outfile)
#end
| 46.505824
| 377
| 0.712916
|
import sys
try:
classfile_in = sys.argv[1]
except:
print("\nUsage: %s classifications_infile" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.\n")
print(" Optional inputs:")
print(" workflow_id=N")
print(" specify the program should only consider classifications from workflow id N")
print(" workflow_version=M")
print(" specify the program should only consider classifications from workflow version M")
print(" (note the program will only consider the major version, i.e. the integer part)")
print(" outfile_csv=filename.csv")
print(" if you want the program to save a sub-file with only classification info from the workflow specified, give the filename here")
print(" --time_elapsed")
print(" specify the program should compute classification durations and total classification work effort")
print(" --remove_duplicates")
print(" remove duplicate classifications (subject-user pairs) before analysis.")
print(" memory-intensive for big files; probably best to pair with outfile_csv so you save the output.")
print(" --keep_nonlive")
print(" by default the program ignores classifications made while the project wasn't 'Live'; setting this will keep them in.")
print(" --keep_allcols")
print(" by default the program only keeps columns required for stats; use this with a specified outfile_csv to save all columns, including annotations. (If you're not using outfile_csv this will just waste memory.)")
print("\nAll output will be to stdout (about 1-2 paragraphs' worth).\n")
sys.exit(0)
import numpy as np # works in 1.10.1
import pandas as pd # works in 0.13.1
import datetime
import dateutil.parser
import json, ujson
import gc
# default value is not to care about workflow ID or version
workflow_id = -1
workflow_version = -1
# by default we won't worry about computing how much time effort the volunteers cumulatively spent
time_elapsed = False
output_csv = False
# by default we'll ignore the possibility of duplicate classifications
remove_duplicates = False
# by default, restrict the analysis to "Live" classifications
keep_nonlive = False
# by default, don't keep every column of the classifications when writing to an outfile
keep_allcols = False
if len(sys.argv) > 2:
for i_arg, argstr in enumerate(sys.argv[2:]):
arg = argstr.split('=')
if arg[0] == "workflow_id":
workflow_id = int(arg[1])
elif arg[0] == "workflow_version":
workflow_version = float(arg[1])
elif (arg[0] == "outfile_csv") | (arg[0] == "outfile"):
outfile_csv = arg[1]
output_csv = True
elif arg[0] == "--keep_allcols":
keep_allcols = True
elif arg[0] == "--time_elapsed":
time_elapsed = True
elif arg[0] == "--remove_duplicates":
remove_duplicates = True
elif arg[0] == "--keep_nonlive":
keep_nonlive = True
# user_ip is a hashed version of their IP
# workflow_id is the numeric ID of this workflow, which you can find in the project builder URL for managing the workflow:
# https://www.zooniverse.org/lab/[project_id]/workflow/[workflow_id]/
# workflow_name is the name you gave your workflow (for sanity checks)
# workflow_version is [bigchangecount].[smallchangecount] and is probably pretty big
# created_at is the date the entry for the classification was recorded
# gold_standard is 1 if this classification was done in gold standard mode
# expert is 1 if this classification was done in expert mode... I think
# metadata (json) is the data the browser sent along with the classification.
# Includes browser information, language, started_at and finished_at
# note started_at and finished_at are perhaps the easiest way to calculate the length of a classification
# (the duration elapsed between consecutive created_at by the same user is another way)
# the difference here is back-end vs front-end
# annotations (json) contains the actual classification information
# which for this analysis we will ignore completely, for now
# subject_data is cross-matched from the subjects table and is for convenience in data reduction
# subject_ids has just the subject ids in the given classification
# here we will ignore this too, except to count subjects once.
# we'll also ignore classification_id, user_ip, workflow information, gold_standard, and expert.
print("Computing project stats using:")
print(" infile: %s" % classfile_in)
classifications.user_name]
n_unreg_class = sum(is_unreg_class)
n_reg_class = n_class_tot - n_unreg_class
# for the leaderboard, which I recommend project builders never make public because
# Just Say No to gamification
# But it's still interesting to see who your most prolific classifiers are, and
nclass_byuser = by_user.created_at.aggregate('count')
nclass_byuser_ranked = nclass_byuser.copy()
nclass_byuser_ranked.sort_values(inplace=True, ascending=False)
# rename the columns properly so they'll print as useful csv headers
nclass_byuser_ranked.name = 'user_name'
nc = pd.DataFrame(nclass_byuser_ranked)
nc.columns = ['n_class']
nclass_byuser_outfile = classfile_in.replace(".csv", "_nclass_byuser_ranked.csv")
# don't accidentally overwrite the classifications file just because someone
if nclass_byuser_outfile == classfile_in:
nclass_byuser_outfile = "project_nclass_byuser_ranked.csv"
nc.to_csv(nclass_byuser_outfile)
nclass_med = np.median(nclass_byuser)
nclass_mean = np.mean(nclass_byuser)
nclass_gini = gini(nclass_byuser)
print("\nOverall:\n\n%d classifications of %d subjects by %d classifiers," % (n_class_tot,n_subj_tot,n_users_tot))
print("%d logged in and %d not logged in, from %d unique IP addresses." % (n_reg,n_unreg,n_ip))
print("%d classifications were from logged-in users, %d from not-logged-in users.\n" % (n_reg_class, n_unreg_class))
print("That's %.2f classifications per subject on average (median = %.1f)." % (subj_class_mean, subj_class_med))
print("The most classified subject has %d classifications; the least-classified subject has %d.\n" % (subj_class_max,subj_class_min))
print("Median number of classifications per user: %.2f" %nclass_med)
print("Mean number of classifications per user: %.2f" % nclass_mean)
print("\nTop 10 most prolific classifiers:")
print(nclass_byuser_ranked.head(10))
print("\n\nGini coefficient for classifications by user: %.2f" % nclass_gini)
print("\nClassifications were collected between %s and %s." % (first_class_day, last_class_day))
print("The highest classification id considered here is %d.\n" % max(classifications.classification_id))
# if the input specified we should compute total time spent by classifiers, compute it
if time_elapsed:
# free up some memory
# do this inside the if because if we're not computing times then the program
del unregistered
del by_user
gc.collect()
classifications['started_at_str'] = [q['started_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
sa_temp = classifications['started_at_str']
fa_temp = classifications['finished_at_str']
= pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print("Oops:\n%s" % the_error)
try:
classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print("Oops:\n%s" % the_error)
classifications['started_at'] = pd.to_datetime(sa_temp)
try:
classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print("Oops:\n%s" % the_error)
try:
classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print("Oops:\n%s" % the_error)
classifications['finished_at'] = pd.to_datetime(fa_temp)
classifications['class_t_length'] = (classifications.finished_at - classifications.started_at)
ok_times = (classifications.class_t_length > np.timedelta64(0, 's')) & (classifications.class_t_length < np.timedelta64(30, 'm'))
n_t_ok = sum(ok_times)
time_spent_classifying = np.sum(classifications['class_t_length'][ok_times])
days_spent_classifying = time_spent_classifying / np.timedelta64(1, 'D')
frac_good_durations = float(n_t_ok)/float(n_class_tot)
print("Based on %d classifications (%.1f percent) where we can probably\ntrust the classification durations, the classifiers spent a total of %.2f days\n(or %.2f years) classifying in the project.\n" % (n_t_ok, frac_good_durations*100., days_spent_classifying, days_spent_classifying / 365.))
mean_t_class = np.mean(classifications['class_t_length'][ok_times])
median_t_class = np.median(classifications['class_t_length'][ok_times])
human_effort_extrap = float(n_class_tot)*float(mean_t_class / np.timedelta64(1, 'D')) / 365.
print("Mean classification length: %8.1f seconds" % float(mean_t_class / np.timedelta64(1, 's')))
print("Median classification length: %6.1f seconds" % float(median_t_class / np.timedelta64(1, 's')))
print("\nIf we use the mean to extrapolate and include the %.1f percent of\nclassifications where the reported duration had an error, that means\nthe total time spent is equivalent to %.2f years of human effort, or\n%.2f years of FTE (1 person working 40 hours/week, no holiday.)\n" % ((1-frac_good_durations)*100., human_effort_extrap, human_effort_extrap * (24.*7.)/40.))
if output_csv:
if time_elapsed:
del ok_times
del sa_temp
del fa_temp
del nclass_byuser
del all_users
del subj_class
gc.collect()
if keep_allcols:
classifications[cols_out].to_csv(outfile_csv)
else:
classifications.to_csv(outfile_csv)
print("File with used subset of classification info written to %s ." % outfile_csv)
print("File with ranked list of user classification counts written to %s ." % nclass_byuser_outfile)
if remove_duplicates:
if (n_dups > 0):
print("Saved info for all classifications that have duplicates to %s ." % duplicate_outfile)
| true
| true
|
f71722c909f61d25a111b3cedd33fb84627eb888
| 29,871
|
py
|
Python
|
msp430backend/msp430_ws/server_protocol.py
|
zlalanne/msp430-webcontrol
|
a4e8f84942c3e16fa447907d2cfff4587013d6b3
|
[
"BSD-3-Clause"
] | null | null | null |
msp430backend/msp430_ws/server_protocol.py
|
zlalanne/msp430-webcontrol
|
a4e8f84942c3e16fa447907d2cfff4587013d6b3
|
[
"BSD-3-Clause"
] | null | null | null |
msp430backend/msp430_ws/server_protocol.py
|
zlalanne/msp430-webcontrol
|
a4e8f84942c3e16fa447907d2cfff4587013d6b3
|
[
"BSD-3-Clause"
] | null | null | null |
import requests
import json
import sys
import re
from twisted.internet.error import ConnectionDone
from twisted.internet import protocol, threads, reactor
from twisted.protocols.basic import LineReceiver
from twisted.python import log
from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol, HttpException
import settings
import common_protocol
import buffer
import msp430_data.interface
import msp430_data.utility
class WebServerProtocol(LineReceiver):
def __init__(self):
self.client = None
def lineReceived(self, line):
line = line.strip()
if self.client is None:
if self.debug:
log.msg("WebServerProtocol.lineReceived - No Client type")
# TODO: This is an untested way to kill the connection. Need
# to test.
self.transport.loseConnection()
else:
self.client.dataReceived(line)
def connectionLost(self, reason=ConnectionDone):
if self.client is None:
if self.debug:
log.msg("WebServerProtocol.connectionClose - No Client type")
return
self.client.connectionLost(reason)
def connectionMade(self):
if self.transport.getPeer().host == settings.SITE_SERVER:
self.client = WebServerClient(self)
else:
self.client = MSP430Client(self)
self.client.connectionMade()
def register_msp430(self):
"""This sends a http request to the django appliaction. This effictevely
enters the MSP430 into the database of the django application. It then
tells the websocket server to alert the user that a new MSP430 has
come online"""
# Need to send MAC and IP of MSP430
payload = {}
payload['mac'] = self.client.mac
payload['ip'] = self.client.protocol.transport.getPeer().host
payload['iface'] = self.client.iface
data = {'json': payload}
try:
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
response = requests.post("http://%s/tcp_comm/register/" % settings.SITE_SERVER_ADDRESS, data=json.dumps(data),
headers=headers)
except:
pass
# TODO: Need to validate response
# Notify Browsers
reactor.callFromThread(self.factory.ws_factory.register_msp430_wsite, self)
def disconnect_msp430(self):
payload = {}
payload['mac'] = self.client.mac
data = {'json':payload}
try:
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
response = requests.post("http://%s/tcp_comm/disconnect/" % settings.SITE_SERVER_ADDRESS, data=json.dumps(data),
headers=headers)
except:
pass
# Notify Browsers
reactor.callFromThread(self.factory.ws_factory.disconnect_msp430_wsite, self)
class WebServerClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def connectionMade(self):
pass
def connectionLost(self, reason=ConnectionDone):
pass
def dataReceived(self, data):
data = data.strip()
try:
msp430s = json.loads(data)["json"]
log.msg(msp430s)
for msp430 in msp430s:
if self.protocol.factory.ws_factory.debug:
log.msg('WebServerClient - Recieved config for MSP430 %s' % msp430['mac'])
except:
if self.protocol.factory.ws_factory.debug:
log.msg('WebServerClient - Error parsing msp430 configs %s' % sys.exc_info())
return 'error'
# Delegate the reqest to the WS factory
self.protocol.factory.ws_factory.config_msp430(msp430)
class ServerState(common_protocol.State):
def __init__(self, client):
self.client = client
def activated(self):
if self.client.protocol.debug:
log.msg("%s.activated()" % self.__class__.__name__)
def deactivated(self):
if self.client.protocol.debug:
log.msg("%s.deactivated()" % self.__class__.__name__)
class WebSocketClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def onMessage(self, msg):
try:
state = self.state_stack.pop_wr()
except IndexError:
if self.protocol.factory.debug:
log.msg("%s.onMessage - Received a message in an unknown state, ignored", self.__class__.__name__)
state.onMessage(msg)
def onClose(self, wasClean, code, reason):
pass
def onOpen(self):
pass
class UserClient(WebSocketClient):
"""User client related protocol"""
def __init__(self, protocol):
WebSocketClient.__init__(self, protocol)
self.associated_msp430 = None
self.streaming_buffer_read = None
self.streaming_buffer_write = None
self.ackcount = 0
self.paused = True
def register_to_msp430(self, msp430_mac):
# Notify factory we want to unregister if registered first
self.ackcount = 0
if self.associated_msp430 is not None:
self.protocol.factory.unregister_user_to_msp430(self, self.associated_msp430)
msp430 = self.protocol.factory.get_msp430(msp430_mac)
if msp430:
self.streaming_buffer_read = buffer.UpdateDict()
self.streaming_buffer_write = buffer.UpdateDict()
self.associated_msp430 = msp430
self.protocol.factory.register_user_to_msp430(self, self.associated_msp430)
# begin streaming
self.resume_streaming()
def resume_streaming(self):
self.paused = False
self.copy_and_send()
def pause_streaming(self):
self.paused = True
def copy_and_send(self):
if self.ackcount <= -10 or self.paused:
return
# copy buffers
self.protocol.factory.copy_msp430_buffers(self.associated_msp430,
self.streaming_buffer_read,
self.streaming_buffer_write)
if len(self.streaming_buffer_read) > 0 or len(self.streaming_buffer_write) > 0:
msg = {'cmd':common_protocol.ServerCommands.WRITE_DATA}
msg['read'] = self.streaming_buffer_read
msg['write'] = self.streaming_buffer_write
self.ackcount -= 1
self.protocol.sendMessage(json.dumps(msg))
# keep polling until we run out of data
reactor.callLater(0, self.copy_and_send)
else:
# when there's new data resume will be called
self.pause_streaming()
def unregister_to_msp430(self):
self.pause_streaming()
if self.associated_msp430 is not None:
self.associated_msp430 = None
def notifyMSP430State(self, msp430, state):
if state == 'config':
if self.associated_msp430 is not msp430:
return
msg = {'cmd':common_protocol.ServerCommands.MSP430_STATE_CHANGE, 'msp430_mac':msp430.client.mac, 'msp430_state':state}
self.protocol.sendMessage(json.dumps(msg))
def onMessage(self, msg):
try:
msg = json.loads(msg)
except:
if self.protocol.debug:
log.msg('UserState.onMessage - JSON error, dropping')
self.protocol.failConnection()
if msg['cmd'] == common_protocol.UserClientCommands.CONNECT_MSP430:
mac = msg['msp430_mac']
self.register_to_msp430(mac)
elif msg['cmd'] == common_protocol.UserClientCommands.ACK_DATA:
ackcount = msg['ack_count']
self.ackcount += ackcount
if self.ackcount > -10:
self.copy_and_send()
elif msg['cmd'] == common_protocol.UserClientCommands.WRITE_DATA:
port = msg['iface_port']
value = msg['value']
if self.associated_msp430 is not None:
self.associated_msp430.write_interface_data(port, value)
def onClose(self, wasClean, code, reason):
self.protocol.factory.disconnect_user(self)
def onOpen(self):
self.protocol.factory.register_user(self)
class TCPClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def connectionMade(self):
pass
def connectionLost(self, reason=ConnectionDone):
pass
def dataReceived(self, data):
try:
state = self.state_stack.pop_wr()
except IndexError:
if self.protocol.factory.debug:
log.msg("%s.onMessage - Received a message in an unknown state, ignored", self.__class__.__name__)
state.dataReceived(data)
class MSP430Client(TCPClient):
def __init__(self, protocol):
TCPClient.__init__(self, protocol)
def connectionMade(self):
self.push_state(MSP430RegisterState(self))
def connectionLost(self, reason=ConnectionDone):
# If we're registered remove ourselves from active client list
if hasattr(self, 'mac'):
self.protocol.factory.ws_factory.disconnect_msp430(self)
pass
def copy_buffers(self, read_buffer, write_buffer):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
for key, value in state.read_data_buffer_eq.iteritems():
read_buffer[key] = value
for key, value in state.write_data_buffer_eq.iteritems():
write_buffer[key] = value
return True
return False
def pause_streaming(self):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
state.pause_streaming()
return True
return False
def resume_streaming(self):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
state.resume_streaming()
return True
return False
def write_interface_data(self, key, data):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
state.write_interface_data(key, data)
return True
return False
def config_io(self, reads, writes):
"""
read/writes are lsts of dicts with the following:
'ch_port': integer or boolean (check cls req)
'equation': empty, or python style math
'cls_name': class name as string, ex) 'ADC'
Returns True/False for success
"""
# check the state of the MSP430 client
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430ConfigState):
# ready to be configured
# MSP430 was waiting for config
pass
elif isinstance(state, MSP430StreamState):
# MSP430 is being re-configured
state.drop_to_config(reads, writes)
# config has to be delegated
return True
else:
# MSP430 can't be put into a config state, fail
return False
state = self.current_state()
# delegate the job to the config state
return state.config_io(reads, writes)
"""MSP430 client related protocol and states"""
class MSP430RegisterState(ServerState):
def __init__(self, client):
super(MSP430RegisterState, self).__init__(client)
self.registered = False
self.re_message_count = 0
def dataReceived(self, data):
if self.re_message_count == 0 and not self.registered:
# MSP430 is requesting to register
# TODO: Add some MSP430 authentication here
if data == common_protocol.ServerCommands.REGISTER:
self.re_message_count += 1
if self.client.protocol.debug:
log.msg("MSP430Client.dataReceived - Registration Request")
self.client.protocol.sendLine(common_protocol.ServerCommands.ACK)
else:
self.client.protocol.sendLine(common_protocol.ServerCommands.NACK)
elif self.re_message_count == 1 and not self.registered:
self.client.protocol.sendLine(common_protocol.ServerCommands.ACK)
def interface_desc(ifaces):
# List of classes that resemble I/O. Creating a struct based on
# their names, docstring, choices and I/O type to send to django
# application.
ret = []
for cls in ifaces:
name = cls.__name__
desc = msp430_data.utility.trim(cls.__doc__)
choices = []
for choice_pin, choice_desc in cls.IO_CHOICES:
choice = {}
choice['s'] = choice_pin
choice['d'] = choice_desc
choices.append(choice)
ret.append({'name':name, 'desc':desc, 'choices':choices, 'io_type':cls.IO_TYPE})
return ret
self.client.iface = {}
self.client.interfaces = msp430_data.interface.get_interface_desc()
for key in self.client.interfaces.iterkeys():
self.client.iface[key] = interface_desc(self.client.interfaces[key])
self.client.mac = data
self.registered = True
self.re_message_count = 0
if self.client.protocol.debug:
log.msg("MSP430Client.dataReceived - Successful Registration")
self.client.push_state(MSP430ConfigState(self.client))
# Add to dictionary of clients in the WS factory
self.client.protocol.factory.ws_factory.register_msp430(self.client)
else:
# TODO: Something went wrong
return
class MSP430ConfigState(ServerState):
"""In this state, the MSP430 is waiting to be configured.
Server is not required to configure the MSP430 immediately.
"""
def __init__(self, client):
super(MSP430ConfigState, self).__init__(client)
def dataReceived(self, data):
if data == common_protocol.MSP430ClientCommands.CONFIG_OK:
log.msg('MSP430ConfigState - MSP430 was configured')
self.client.push_state(MSP430StreamState(self.client,
reads=self.config_reads,
writes=self.config_writes,
interfaces=self.config_interfaces,
mapping=self.config_mapping
))
elif data == common_protocol.MSP430ClientCommands.CONFIG_FAIL:
if self.client.protocol.debug:
log.msg('MSP430ConfigState - MSP430 failed to configure')
# TODO: Notify web server
def config_io(self, reads, writes):
"""
read/writes are lsts of dicts with the following:
'ch_port': integer or boolean (check cls req)
'equation': empty, or python style math
'cls_name': class name as string, ex) 'ADC'
Returns True/False for success
"""
self.display_reads = reads
self.display_writes = writes
# Format IO to store on the server
def format_io(io_collection):
# Duplicate equations allowed, duplicate instances not allowed
instanced_io_dict = {}
for io in io_collection:
cls_str = io['cls_name']
ch_port = io['ch_port']
equation = io['equation']
key = 'cls:%s, port:%s' % (cls_str, ch_port)
if key not in instanced_io_dict:
io_new_dict = {'cls_name':cls_str, 'ch_port':ch_port}
io_new_dict['equations'] = [equation]
instanced_io_dict[key] = io_new_dict
else:
# we can have more then one equation per instance
existing_instance = instanced_io_dict[key]
equations = existing_instance['equations']
if equation not in equations:
equations.append(equation)
return instanced_io_dict
# Format IO to give to the msp430
def format_io_msp430(io_collection):
config_mapping = {}
msp430_configs = {}
i = 0
for key, io in io_collection:
cls = getattr(msp430_data.interface, io['cls_name'])
msp430_configs[str(i)] = {'pin': io['ch_port'], 'opcode': cls.IO_OPCODE}
config_mapping[str(i)] = io
i += 1
return msp430_configs, config_mapping
self.config_reads = format_io(reads)
self.config_writes = format_io(writes)
self.config_interfaces, self.config_mapping = format_io_msp430(self.config_reads.items() + self.config_writes.items())
if self.client.protocol.debug:
log.msg('MSP430ConfigState - Pushing configs to remote MSP430')
msg = {'cmd':common_protocol.ServerCommands.CONFIG,
'payload':self.config_interfaces}
self.client.protocol.sendLine(json.dumps(msg))
class MSP430StreamState(ServerState):
""" In this state the MSP430 has been configured and is streaming data"""
def __init__(self, client, reads, writes, interfaces, mapping):
super(MSP430StreamState, self).__init__(client)
# Read/Write configs is used to communicate with the web
# Interface config is used to communicate to the msp430
self.config_reads = reads
self.config_writes = writes
self.config_interfaces = interfaces
self.config_mapping = mapping
# Buffers for storing the evaluated data
self.write_data_buffer_eq = {}
self.read_data_buffer_eq = {}
self.datamsgcount_ack = 0
def evaluate_eq(self, eq, value):
if eq != '':
# TODO: fix security
x = value
new_value = eval(eq)
else:
new_value = value
return new_value
def deactivated(self):
super(MSP430StreamState, self).deactivated()
self.client.protocol.factory.ws_factory.notify_clients_msp430_state_change(self.client.protocol, state='drop_stream')
def activated(self):
super(MSP430StreamState, self).activated()
self.client.protocol.factory.ws_factory.notify_clients_msp430_state_change(self.client.protocol, state='stream')
def dataReceived(self, data):
try:
data = json.loads(data)
except ValueError:
log.msg("MSP430StreamState.dataReceived - Problem with JSON structure")
log.msg(data)
return
if data['cmd'] == common_protocol.MSP430ClientCommands.DROP_TO_CONFIG_OK:
# Order here is important, pop first!
self.client.pop_state()
self.client.current_state().config_io(self.delegate_config_reads, self.delegate_config_writes)
if data['cmd'] == common_protocol.MSP430ClientCommands.DATA:
self.datamsgcount_ack += 1
interfaces = data['interfaces']
for key, value in interfaces.iteritems():
interface = self.config_mapping[key]
cls_name = interface["cls_name"]
pin = interface["ch_port"]
cls = getattr(msp430_data.interface, cls_name)
# Convert from raw data string to correct data type
new_value = cls.parse_input(value)
# Evaluate equation
if msp430_data.interface.IWrite in cls.__bases__:
for eq in interface["equations"]:
new_key = "cls:{}, port:{}, eq:{}".format(cls_name, int(pin), eq)
self.write_data_buffer_eq[new_key] = {"calculated" : self.evaluate_eq(eq, new_value),
"real": new_value}
elif msp430_data.interface.IRead in cls.__bases__:
for eq in interface["equations"]:
new_key = "cls:{}, port:{}, eq:{}".format(cls_name, int(pin), eq)
self.read_data_buffer_eq[new_key] = self.evaluate_eq(eq, new_value)
# Notify factory to update listening clients
if self.datamsgcount_ack >= 5:
data = {'cmd':common_protocol.ServerCommands.ACK_DATA, 'count':self.datamsgcount_ack}
self.client.protocol.sendLine(json.dumps(data, sort_keys=True))
self.datamsgcount_ack = 0
# Notify factory of new data event
self.client.protocol.factory.ws_factory.msp430_new_data_event(self.client)
def resume_streaming(self):
# Starting to stream again, reset the ack count
self.datamsgcount_ack = 0
msg = {'cmd':common_protocol.ServerCommands.RESUME_STREAMING}
self.client.protocol.sendLine(json.dumps(msg))
def pause_streaming(self):
msg = {'cmd':common_protocol.ServerCommands.PAUSE_STREAMING}
self.client.protocol.sendLine(json.dumps(msg))
def write_interface_data(self, key, value):
# Get the class and port from the key
match = re.match(r'cls:([A-Za-z0-9_]+),\ port:(\d+)', key)
try:
cls_name = match.group(1)
pin = match.group(2)
except:
# TODO: add correct exception
return
cls = getattr(msp430_data.interface, cls_name)
payload = {'opcode': cls.IO_OPCODE, 'pin' : pin, 'value' : str(value)}
msg = {'cmd':common_protocol.ServerCommands.WRITE_DATA,
'payload': payload}
self.client.protocol.sendLine(json.dumps(msg))
def drop_to_config(self, reads, writes):
# Drop remote MSP430 to config state
msg = {'cmd':common_protocol.ServerCommands.DROP_TO_CONFIG}
self.client.protocol.sendLine(json.dumps(msg))
self.delegate_config_reads = reads
self.delegate_config_writes = writes
class MSP430ServerProtocol(WebSocketServerProtocol):
"""Base server protocol, instantiates child protocols"""
def __init__(self):
self.client = None
def onConnect(self, connectionRequest):
"""Connection to WebSocket Protocol"""
def user(headers):
if self.debug:
log.msg("MSP430ServerProtocol.onConnect - User connected")
return UserClient(self)
# MSP430 won't connect via WebSocket so only option is the client
paths = {
'/':user,
}
if connectionRequest.path not in paths:
raise HttpException(httpstatus.HTTP_STATUS_CODE_NOT_FOUND[0],
httpstatus.HTTP_STATUS_CODE_NOT_FOUND[1])
self.client = paths[connectionRequest.path](connectionRequest.headers)
def onMessage(self, msg, binary):
"""Message received from client"""
if self.client is None:
if self.debug:
log.msg("MSP430ServerProtocol.onMessage - No Client type")
self.failConnection()
self.client.onMessage(msg)
def onOpen(self):
WebSocketServerProtocol.onOpen(self)
if self.client is not None:
self.client.onOpen()
def onClose(self, wasClean, code, reason):
"""Connect closed, cleanup"""
# base logs
WebSocketServerProtocol.onClose(self, wasClean, code, reason)
if self.client is None:
if self.debug:
log.msg("MSP430ServerProtocol.onClose - No Client type")
return
self.client.onClose(wasClean, code, reason)
class MSP430SocketServerFactory(WebSocketServerFactory):
"""Manages every MSP430 connected to the server."""
def __init__(self, *args, **kwargs):
WebSocketServerFactory.__init__(self, *args, **kwargs)
# safari
self.allowHixie76 = True
# Identify MSP430's by their macs
# Identify user by peerstr
self.msp430_clients = {}
self.user_client = {}
# Key MSP430 mac, value list of user clients
self.msp430_clients_registered_users = {}
def register_user_to_msp430(self, client, msp430):
if len(self.msp430_clients_registered_users[msp430.mac]) == 0:
# MSP430 wasn't streaming, start streaming!
msp430.resume_streaming()
if client not in self.msp430_clients_registered_users[msp430.mac]:
self.msp430_clients_registered_users[msp430.mac].append(client)
if self.debug:
log.msg('MSP430SocketServerFactory.register_user_to_msp430 msp430:%s user:%s' %
(msp430.mac, client.protocol.peerstr))
def unregister_user_to_msp430(self, client, msp430):
client.unregister_to_msp430()
if msp430 is None:
return
if msp430.mac in self.msp430_clients_registered_users:
if client in self.msp430_clients_registered_users[msp430.mac]:
self.msp430_clients_registered_users[msp430.mac].remove(client)
if self.debug:
log.msg('msp430SocketServerFactory.unregister_user_to_msp430 msp430:%s user:%s' %
(msp430.mac, client.protocol.peerstr))
if msp430.mac not in self.msp430_clients_registered_users or len(self.msp430_clients_registered_users[msp430.mac]) == 0:
# Pause streaming
msp430.pause_streaming()
def msp430_new_data_event(self, msp430):
# resume streaming on any MSP430s waiting for new data
for client in self.msp430_clients_registered_users[msp430.mac]:
client.resume_streaming()
def copy_msp430_buffers(self, msp430, read_buffer, write_buffer):
msp430.copy_buffers(read_buffer, write_buffer)
def get_msp430(self, msp430_mac):
if msp430_mac in self.msp430_clients:
return self.msp430_clients[msp430_mac]
return None
def notify_clients_msp430_state_change(self, msp430, state='offline'):
for peerstr, user in self.user_client.iteritems():
user.notifyMSP430State(msp430, state)
def register_user(self, user):
if user.protocol.peerstr not in self.user_client:
self.user_client[user.protocol.peerstr] = user
if self.debug:
log.msg('MSP430SocketServerFactory.register_user %s' % user.protocol.peerstr)
def disconnect_user(self, user):
if self.debug:
log.msg('MSP430SocketServerFactory.disconnect_user %s' % user.protocol.peerstr)
del self.user_client[user.protocol.peerstr]
self.unregister_user_to_msp430(user, user.associated_msp430)
def register_msp430(self, msp430):
# This is called when the MSP430 has been authenticated with the WS server
# register on the site server
msp430.protocol.register_msp430()
# register locally to the factory
self.msp430_clients[msp430.mac] = msp430
self.msp430_clients_registered_users[msp430.mac] = []
if self.debug:
log.msg("MSP430SocketServerFactory.register_msp430 - %s registered, %d msp430" % (msp430.mac, len(self.msp430_clients)))
def register_msp430_wsite(self, msp430):
"""Called after MSP430 has been registed to the website"""
self.notify_clients_msp430_state_change(msp430, state='online')
def disconnect_msp430(self, msp430):
if hasattr(msp430, 'mac'):
if self.debug:
log.msg("MSP430SocketServerFactory.disconnect_msp430 - %s msp430 disconnected" % (msp430.mac,))
reactor.callInThread(msp430.protocol.disconnect_msp430)
try:
del self.msp430_clients[msp430.mac]
del self.msp430_clients_registered_users[msp430.mac]
except KeyError:
log.msg(self.msp430_clients)
def disconnect_msp430_wsite(self, msp430):
"""Called after MSP430 has been disconnected from web server"""
self.notify_clients_msp430_state_change(msp430, state='offline')
def config_msp430(self, configs):
"""
Not thread safe
configs:
dict with the following keys:
'read': lst of port configs
'write: lst of port configs
'mac': '00:00:...'
port config dict with the following keys:
'ch_port': integer or boolean (check cls req)
'equation': empty, or python style math
'cls_name': class name as string, ex) 'ADC'
Return: True/False for success
"""
# Check if MSP430 is actually an active client
mac = configs['mac']
if mac not in self.msp430_clients:
return False
msp430_client = self.msp430_clients[mac]
return msp430_client.config_io(reads=configs['read'], writes=configs['write'])
| 35.773653
| 132
| 0.614174
|
import requests
import json
import sys
import re
from twisted.internet.error import ConnectionDone
from twisted.internet import protocol, threads, reactor
from twisted.protocols.basic import LineReceiver
from twisted.python import log
from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol, HttpException
import settings
import common_protocol
import buffer
import msp430_data.interface
import msp430_data.utility
class WebServerProtocol(LineReceiver):
def __init__(self):
self.client = None
def lineReceived(self, line):
line = line.strip()
if self.client is None:
if self.debug:
log.msg("WebServerProtocol.lineReceived - No Client type")
self.transport.loseConnection()
else:
self.client.dataReceived(line)
def connectionLost(self, reason=ConnectionDone):
if self.client is None:
if self.debug:
log.msg("WebServerProtocol.connectionClose - No Client type")
return
self.client.connectionLost(reason)
def connectionMade(self):
if self.transport.getPeer().host == settings.SITE_SERVER:
self.client = WebServerClient(self)
else:
self.client = MSP430Client(self)
self.client.connectionMade()
def register_msp430(self):
payload = {}
payload['mac'] = self.client.mac
payload['ip'] = self.client.protocol.transport.getPeer().host
payload['iface'] = self.client.iface
data = {'json': payload}
try:
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
response = requests.post("http://%s/tcp_comm/register/" % settings.SITE_SERVER_ADDRESS, data=json.dumps(data),
headers=headers)
except:
pass
reactor.callFromThread(self.factory.ws_factory.register_msp430_wsite, self)
def disconnect_msp430(self):
payload = {}
payload['mac'] = self.client.mac
data = {'json':payload}
try:
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
response = requests.post("http://%s/tcp_comm/disconnect/" % settings.SITE_SERVER_ADDRESS, data=json.dumps(data),
headers=headers)
except:
pass
reactor.callFromThread(self.factory.ws_factory.disconnect_msp430_wsite, self)
class WebServerClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def connectionMade(self):
pass
def connectionLost(self, reason=ConnectionDone):
pass
def dataReceived(self, data):
data = data.strip()
try:
msp430s = json.loads(data)["json"]
log.msg(msp430s)
for msp430 in msp430s:
if self.protocol.factory.ws_factory.debug:
log.msg('WebServerClient - Recieved config for MSP430 %s' % msp430['mac'])
except:
if self.protocol.factory.ws_factory.debug:
log.msg('WebServerClient - Error parsing msp430 configs %s' % sys.exc_info())
return 'error'
self.protocol.factory.ws_factory.config_msp430(msp430)
class ServerState(common_protocol.State):
def __init__(self, client):
self.client = client
def activated(self):
if self.client.protocol.debug:
log.msg("%s.activated()" % self.__class__.__name__)
def deactivated(self):
if self.client.protocol.debug:
log.msg("%s.deactivated()" % self.__class__.__name__)
class WebSocketClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def onMessage(self, msg):
try:
state = self.state_stack.pop_wr()
except IndexError:
if self.protocol.factory.debug:
log.msg("%s.onMessage - Received a message in an unknown state, ignored", self.__class__.__name__)
state.onMessage(msg)
def onClose(self, wasClean, code, reason):
pass
def onOpen(self):
pass
class UserClient(WebSocketClient):
def __init__(self, protocol):
WebSocketClient.__init__(self, protocol)
self.associated_msp430 = None
self.streaming_buffer_read = None
self.streaming_buffer_write = None
self.ackcount = 0
self.paused = True
def register_to_msp430(self, msp430_mac):
self.ackcount = 0
if self.associated_msp430 is not None:
self.protocol.factory.unregister_user_to_msp430(self, self.associated_msp430)
msp430 = self.protocol.factory.get_msp430(msp430_mac)
if msp430:
self.streaming_buffer_read = buffer.UpdateDict()
self.streaming_buffer_write = buffer.UpdateDict()
self.associated_msp430 = msp430
self.protocol.factory.register_user_to_msp430(self, self.associated_msp430)
self.resume_streaming()
def resume_streaming(self):
self.paused = False
self.copy_and_send()
def pause_streaming(self):
self.paused = True
def copy_and_send(self):
if self.ackcount <= -10 or self.paused:
return
self.protocol.factory.copy_msp430_buffers(self.associated_msp430,
self.streaming_buffer_read,
self.streaming_buffer_write)
if len(self.streaming_buffer_read) > 0 or len(self.streaming_buffer_write) > 0:
msg = {'cmd':common_protocol.ServerCommands.WRITE_DATA}
msg['read'] = self.streaming_buffer_read
msg['write'] = self.streaming_buffer_write
self.ackcount -= 1
self.protocol.sendMessage(json.dumps(msg))
reactor.callLater(0, self.copy_and_send)
else:
self.pause_streaming()
def unregister_to_msp430(self):
self.pause_streaming()
if self.associated_msp430 is not None:
self.associated_msp430 = None
def notifyMSP430State(self, msp430, state):
if state == 'config':
if self.associated_msp430 is not msp430:
return
msg = {'cmd':common_protocol.ServerCommands.MSP430_STATE_CHANGE, 'msp430_mac':msp430.client.mac, 'msp430_state':state}
self.protocol.sendMessage(json.dumps(msg))
def onMessage(self, msg):
try:
msg = json.loads(msg)
except:
if self.protocol.debug:
log.msg('UserState.onMessage - JSON error, dropping')
self.protocol.failConnection()
if msg['cmd'] == common_protocol.UserClientCommands.CONNECT_MSP430:
mac = msg['msp430_mac']
self.register_to_msp430(mac)
elif msg['cmd'] == common_protocol.UserClientCommands.ACK_DATA:
ackcount = msg['ack_count']
self.ackcount += ackcount
if self.ackcount > -10:
self.copy_and_send()
elif msg['cmd'] == common_protocol.UserClientCommands.WRITE_DATA:
port = msg['iface_port']
value = msg['value']
if self.associated_msp430 is not None:
self.associated_msp430.write_interface_data(port, value)
def onClose(self, wasClean, code, reason):
self.protocol.factory.disconnect_user(self)
def onOpen(self):
self.protocol.factory.register_user(self)
class TCPClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def connectionMade(self):
pass
def connectionLost(self, reason=ConnectionDone):
pass
def dataReceived(self, data):
try:
state = self.state_stack.pop_wr()
except IndexError:
if self.protocol.factory.debug:
log.msg("%s.onMessage - Received a message in an unknown state, ignored", self.__class__.__name__)
state.dataReceived(data)
class MSP430Client(TCPClient):
def __init__(self, protocol):
TCPClient.__init__(self, protocol)
def connectionMade(self):
self.push_state(MSP430RegisterState(self))
def connectionLost(self, reason=ConnectionDone):
# If we're registered remove ourselves from active client list
if hasattr(self, 'mac'):
self.protocol.factory.ws_factory.disconnect_msp430(self)
pass
def copy_buffers(self, read_buffer, write_buffer):
try:
state = self.current_state()
except IndexError:
return False
if isinstance(state, MSP430StreamState):
for key, value in state.read_data_buffer_eq.iteritems():
read_buffer[key] = value
for key, value in state.write_data_buffer_eq.iteritems():
write_buffer[key] = value
return True
return False
def pause_streaming(self):
try:
state = self.current_state()
except IndexError:
return False
if isinstance(state, MSP430StreamState):
state.pause_streaming()
return True
return False
def resume_streaming(self):
try:
state = self.current_state()
except IndexError:
return False
if isinstance(state, MSP430StreamState):
state.resume_streaming()
return True
return False
def write_interface_data(self, key, data):
try:
state = self.current_state()
except IndexError:
return False
if isinstance(state, MSP430StreamState):
state.write_interface_data(key, data)
return True
return False
def config_io(self, reads, writes):
try:
state = self.current_state()
except IndexError:
return False
if isinstance(state, MSP430ConfigState):
pass
elif isinstance(state, MSP430StreamState):
state.drop_to_config(reads, writes)
return True
else:
return False
state = self.current_state()
# delegate the job to the config state
return state.config_io(reads, writes)
class MSP430RegisterState(ServerState):
def __init__(self, client):
super(MSP430RegisterState, self).__init__(client)
self.registered = False
self.re_message_count = 0
def dataReceived(self, data):
if self.re_message_count == 0 and not self.registered:
# MSP430 is requesting to register
# TODO: Add some MSP430 authentication here
if data == common_protocol.ServerCommands.REGISTER:
self.re_message_count += 1
if self.client.protocol.debug:
log.msg("MSP430Client.dataReceived - Registration Request")
self.client.protocol.sendLine(common_protocol.ServerCommands.ACK)
else:
self.client.protocol.sendLine(common_protocol.ServerCommands.NACK)
elif self.re_message_count == 1 and not self.registered:
self.client.protocol.sendLine(common_protocol.ServerCommands.ACK)
def interface_desc(ifaces):
# List of classes that resemble I/O. Creating a struct based on
# their names, docstring, choices and I/O type to send to django
# application.
ret = []
for cls in ifaces:
name = cls.__name__
desc = msp430_data.utility.trim(cls.__doc__)
choices = []
for choice_pin, choice_desc in cls.IO_CHOICES:
choice = {}
choice['s'] = choice_pin
choice['d'] = choice_desc
choices.append(choice)
ret.append({'name':name, 'desc':desc, 'choices':choices, 'io_type':cls.IO_TYPE})
return ret
self.client.iface = {}
self.client.interfaces = msp430_data.interface.get_interface_desc()
for key in self.client.interfaces.iterkeys():
self.client.iface[key] = interface_desc(self.client.interfaces[key])
self.client.mac = data
self.registered = True
self.re_message_count = 0
if self.client.protocol.debug:
log.msg("MSP430Client.dataReceived - Successful Registration")
self.client.push_state(MSP430ConfigState(self.client))
# Add to dictionary of clients in the WS factory
self.client.protocol.factory.ws_factory.register_msp430(self.client)
else:
# TODO: Something went wrong
return
class MSP430ConfigState(ServerState):
def __init__(self, client):
super(MSP430ConfigState, self).__init__(client)
def dataReceived(self, data):
if data == common_protocol.MSP430ClientCommands.CONFIG_OK:
log.msg('MSP430ConfigState - MSP430 was configured')
self.client.push_state(MSP430StreamState(self.client,
reads=self.config_reads,
writes=self.config_writes,
interfaces=self.config_interfaces,
mapping=self.config_mapping
))
elif data == common_protocol.MSP430ClientCommands.CONFIG_FAIL:
if self.client.protocol.debug:
log.msg('MSP430ConfigState - MSP430 failed to configure')
# TODO: Notify web server
def config_io(self, reads, writes):
self.display_reads = reads
self.display_writes = writes
# Format IO to store on the server
def format_io(io_collection):
# Duplicate equations allowed, duplicate instances not allowed
instanced_io_dict = {}
for io in io_collection:
cls_str = io['cls_name']
ch_port = io['ch_port']
equation = io['equation']
key = 'cls:%s, port:%s' % (cls_str, ch_port)
if key not in instanced_io_dict:
io_new_dict = {'cls_name':cls_str, 'ch_port':ch_port}
io_new_dict['equations'] = [equation]
instanced_io_dict[key] = io_new_dict
else:
# we can have more then one equation per instance
existing_instance = instanced_io_dict[key]
equations = existing_instance['equations']
if equation not in equations:
equations.append(equation)
return instanced_io_dict
# Format IO to give to the msp430
def format_io_msp430(io_collection):
config_mapping = {}
msp430_configs = {}
i = 0
for key, io in io_collection:
cls = getattr(msp430_data.interface, io['cls_name'])
msp430_configs[str(i)] = {'pin': io['ch_port'], 'opcode': cls.IO_OPCODE}
config_mapping[str(i)] = io
i += 1
return msp430_configs, config_mapping
self.config_reads = format_io(reads)
self.config_writes = format_io(writes)
self.config_interfaces, self.config_mapping = format_io_msp430(self.config_reads.items() + self.config_writes.items())
if self.client.protocol.debug:
log.msg('MSP430ConfigState - Pushing configs to remote MSP430')
msg = {'cmd':common_protocol.ServerCommands.CONFIG,
'payload':self.config_interfaces}
self.client.protocol.sendLine(json.dumps(msg))
class MSP430StreamState(ServerState):
def __init__(self, client, reads, writes, interfaces, mapping):
super(MSP430StreamState, self).__init__(client)
# Read/Write configs is used to communicate with the web
# Interface config is used to communicate to the msp430
self.config_reads = reads
self.config_writes = writes
self.config_interfaces = interfaces
self.config_mapping = mapping
# Buffers for storing the evaluated data
self.write_data_buffer_eq = {}
self.read_data_buffer_eq = {}
self.datamsgcount_ack = 0
def evaluate_eq(self, eq, value):
if eq != '':
# TODO: fix security
x = value
new_value = eval(eq)
else:
new_value = value
return new_value
def deactivated(self):
super(MSP430StreamState, self).deactivated()
self.client.protocol.factory.ws_factory.notify_clients_msp430_state_change(self.client.protocol, state='drop_stream')
def activated(self):
super(MSP430StreamState, self).activated()
self.client.protocol.factory.ws_factory.notify_clients_msp430_state_change(self.client.protocol, state='stream')
def dataReceived(self, data):
try:
data = json.loads(data)
except ValueError:
log.msg("MSP430StreamState.dataReceived - Problem with JSON structure")
log.msg(data)
return
if data['cmd'] == common_protocol.MSP430ClientCommands.DROP_TO_CONFIG_OK:
# Order here is important, pop first!
self.client.pop_state()
self.client.current_state().config_io(self.delegate_config_reads, self.delegate_config_writes)
if data['cmd'] == common_protocol.MSP430ClientCommands.DATA:
self.datamsgcount_ack += 1
interfaces = data['interfaces']
for key, value in interfaces.iteritems():
interface = self.config_mapping[key]
cls_name = interface["cls_name"]
pin = interface["ch_port"]
cls = getattr(msp430_data.interface, cls_name)
# Convert from raw data string to correct data type
new_value = cls.parse_input(value)
# Evaluate equation
if msp430_data.interface.IWrite in cls.__bases__:
for eq in interface["equations"]:
new_key = "cls:{}, port:{}, eq:{}".format(cls_name, int(pin), eq)
self.write_data_buffer_eq[new_key] = {"calculated" : self.evaluate_eq(eq, new_value),
"real": new_value}
elif msp430_data.interface.IRead in cls.__bases__:
for eq in interface["equations"]:
new_key = "cls:{}, port:{}, eq:{}".format(cls_name, int(pin), eq)
self.read_data_buffer_eq[new_key] = self.evaluate_eq(eq, new_value)
# Notify factory to update listening clients
if self.datamsgcount_ack >= 5:
data = {'cmd':common_protocol.ServerCommands.ACK_DATA, 'count':self.datamsgcount_ack}
self.client.protocol.sendLine(json.dumps(data, sort_keys=True))
self.datamsgcount_ack = 0
# Notify factory of new data event
self.client.protocol.factory.ws_factory.msp430_new_data_event(self.client)
def resume_streaming(self):
# Starting to stream again, reset the ack count
self.datamsgcount_ack = 0
msg = {'cmd':common_protocol.ServerCommands.RESUME_STREAMING}
self.client.protocol.sendLine(json.dumps(msg))
def pause_streaming(self):
msg = {'cmd':common_protocol.ServerCommands.PAUSE_STREAMING}
self.client.protocol.sendLine(json.dumps(msg))
def write_interface_data(self, key, value):
# Get the class and port from the key
match = re.match(r'cls:([A-Za-z0-9_]+),\ port:(\d+)', key)
try:
cls_name = match.group(1)
pin = match.group(2)
except:
# TODO: add correct exception
return
cls = getattr(msp430_data.interface, cls_name)
payload = {'opcode': cls.IO_OPCODE, 'pin' : pin, 'value' : str(value)}
msg = {'cmd':common_protocol.ServerCommands.WRITE_DATA,
'payload': payload}
self.client.protocol.sendLine(json.dumps(msg))
def drop_to_config(self, reads, writes):
# Drop remote MSP430 to config state
msg = {'cmd':common_protocol.ServerCommands.DROP_TO_CONFIG}
self.client.protocol.sendLine(json.dumps(msg))
self.delegate_config_reads = reads
self.delegate_config_writes = writes
class MSP430ServerProtocol(WebSocketServerProtocol):
def __init__(self):
self.client = None
def onConnect(self, connectionRequest):
def user(headers):
if self.debug:
log.msg("MSP430ServerProtocol.onConnect - User connected")
return UserClient(self)
# MSP430 won't connect via WebSocket so only option is the client
paths = {
'/':user,
}
if connectionRequest.path not in paths:
raise HttpException(httpstatus.HTTP_STATUS_CODE_NOT_FOUND[0],
httpstatus.HTTP_STATUS_CODE_NOT_FOUND[1])
self.client = paths[connectionRequest.path](connectionRequest.headers)
def onMessage(self, msg, binary):
if self.client is None:
if self.debug:
log.msg("MSP430ServerProtocol.onMessage - No Client type")
self.failConnection()
self.client.onMessage(msg)
def onOpen(self):
WebSocketServerProtocol.onOpen(self)
if self.client is not None:
self.client.onOpen()
def onClose(self, wasClean, code, reason):
WebSocketServerProtocol.onClose(self, wasClean, code, reason)
if self.client is None:
if self.debug:
log.msg("MSP430ServerProtocol.onClose - No Client type")
return
self.client.onClose(wasClean, code, reason)
class MSP430SocketServerFactory(WebSocketServerFactory):
def __init__(self, *args, **kwargs):
WebSocketServerFactory.__init__(self, *args, **kwargs)
self.allowHixie76 = True
# Identify user by peerstr
self.msp430_clients = {}
self.user_client = {}
# Key MSP430 mac, value list of user clients
self.msp430_clients_registered_users = {}
def register_user_to_msp430(self, client, msp430):
if len(self.msp430_clients_registered_users[msp430.mac]) == 0:
# MSP430 wasn't streaming, start streaming!
msp430.resume_streaming()
if client not in self.msp430_clients_registered_users[msp430.mac]:
self.msp430_clients_registered_users[msp430.mac].append(client)
if self.debug:
log.msg('MSP430SocketServerFactory.register_user_to_msp430 msp430:%s user:%s' %
(msp430.mac, client.protocol.peerstr))
def unregister_user_to_msp430(self, client, msp430):
client.unregister_to_msp430()
if msp430 is None:
return
if msp430.mac in self.msp430_clients_registered_users:
if client in self.msp430_clients_registered_users[msp430.mac]:
self.msp430_clients_registered_users[msp430.mac].remove(client)
if self.debug:
log.msg('msp430SocketServerFactory.unregister_user_to_msp430 msp430:%s user:%s' %
(msp430.mac, client.protocol.peerstr))
if msp430.mac not in self.msp430_clients_registered_users or len(self.msp430_clients_registered_users[msp430.mac]) == 0:
msp430.pause_streaming()
def msp430_new_data_event(self, msp430):
for client in self.msp430_clients_registered_users[msp430.mac]:
client.resume_streaming()
def copy_msp430_buffers(self, msp430, read_buffer, write_buffer):
msp430.copy_buffers(read_buffer, write_buffer)
def get_msp430(self, msp430_mac):
if msp430_mac in self.msp430_clients:
return self.msp430_clients[msp430_mac]
return None
def notify_clients_msp430_state_change(self, msp430, state='offline'):
for peerstr, user in self.user_client.iteritems():
user.notifyMSP430State(msp430, state)
def register_user(self, user):
if user.protocol.peerstr not in self.user_client:
self.user_client[user.protocol.peerstr] = user
if self.debug:
log.msg('MSP430SocketServerFactory.register_user %s' % user.protocol.peerstr)
def disconnect_user(self, user):
if self.debug:
log.msg('MSP430SocketServerFactory.disconnect_user %s' % user.protocol.peerstr)
del self.user_client[user.protocol.peerstr]
self.unregister_user_to_msp430(user, user.associated_msp430)
def register_msp430(self, msp430):
msp430.protocol.register_msp430()
self.msp430_clients[msp430.mac] = msp430
self.msp430_clients_registered_users[msp430.mac] = []
if self.debug:
log.msg("MSP430SocketServerFactory.register_msp430 - %s registered, %d msp430" % (msp430.mac, len(self.msp430_clients)))
def register_msp430_wsite(self, msp430):
self.notify_clients_msp430_state_change(msp430, state='online')
def disconnect_msp430(self, msp430):
if hasattr(msp430, 'mac'):
if self.debug:
log.msg("MSP430SocketServerFactory.disconnect_msp430 - %s msp430 disconnected" % (msp430.mac,))
reactor.callInThread(msp430.protocol.disconnect_msp430)
try:
del self.msp430_clients[msp430.mac]
del self.msp430_clients_registered_users[msp430.mac]
except KeyError:
log.msg(self.msp430_clients)
def disconnect_msp430_wsite(self, msp430):
self.notify_clients_msp430_state_change(msp430, state='offline')
def config_msp430(self, configs):
mac = configs['mac']
if mac not in self.msp430_clients:
return False
msp430_client = self.msp430_clients[mac]
return msp430_client.config_io(reads=configs['read'], writes=configs['write'])
| true
| true
|
f71723d3940a1fbf9bf3aa6a991155af7fdbb8a9
| 1,520
|
py
|
Python
|
solfasol/publications/migrations/0011_auto_20201008_1533.py
|
rekognize/solfasol
|
c960c3364c753d75161242eccac4f085d800c843
|
[
"MIT"
] | null | null | null |
solfasol/publications/migrations/0011_auto_20201008_1533.py
|
rekognize/solfasol
|
c960c3364c753d75161242eccac4f085d800c843
|
[
"MIT"
] | 1
|
2020-06-18T13:08:47.000Z
|
2020-06-18T13:08:47.000Z
|
solfasol/publications/migrations/0011_auto_20201008_1533.py
|
Solfasol/solfasol
|
c960c3364c753d75161242eccac4f085d800c843
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-10-08 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publications', '0010_auto_20201007_2250'),
]
operations = [
migrations.AddField(
model_name='publication',
name='email',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='facebook',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='icon',
field=models.ImageField(blank=True, null=True, upload_to='publications/icon/'),
),
migrations.AddField(
model_name='publication',
name='instagram',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='patreon',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='twitter',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='youtube',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| 31.020408
| 91
| 0.571711
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publications', '0010_auto_20201007_2250'),
]
operations = [
migrations.AddField(
model_name='publication',
name='email',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='facebook',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='icon',
field=models.ImageField(blank=True, null=True, upload_to='publications/icon/'),
),
migrations.AddField(
model_name='publication',
name='instagram',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='patreon',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='twitter',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='publication',
name='youtube',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| true
| true
|
f71723fdc10ac4d26d6a68387acb900d9d2c358c
| 674
|
py
|
Python
|
pipeline/python/ion/reports/parseBeadfind.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 125
|
2015-01-22T05:43:23.000Z
|
2022-03-22T17:15:59.000Z
|
pipeline/python/ion/reports/parseBeadfind.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 59
|
2015-02-10T09:13:06.000Z
|
2021-11-11T02:32:38.000Z
|
pipeline/python/ion/reports/parseBeadfind.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 98
|
2015-01-17T01:25:10.000Z
|
2022-03-18T17:29:42.000Z
|
# Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved
import sys
def parseLog(logText):
metrics = {}
# Get Headings for beadfind
for line in logText:
if "=" in line:
name = line.strip().split("=")
key = name[0].strip()
value = name[1].strip()
metrics[key] = value
return metrics
def generateMetrics(beadPath):
f = open(beadPath, "r")
beadRaw = f.readlines()
f.close()
data = parseLog(beadRaw)
return data
if __name__ == "__main__":
f = open(sys.argv[1], "r")
processParams = f.readlines()
f.close()
data = parseLog(processParams)
print(data)
| 21.0625
| 66
| 0.581602
|
import sys
def parseLog(logText):
metrics = {}
for line in logText:
if "=" in line:
name = line.strip().split("=")
key = name[0].strip()
value = name[1].strip()
metrics[key] = value
return metrics
def generateMetrics(beadPath):
f = open(beadPath, "r")
beadRaw = f.readlines()
f.close()
data = parseLog(beadRaw)
return data
if __name__ == "__main__":
f = open(sys.argv[1], "r")
processParams = f.readlines()
f.close()
data = parseLog(processParams)
print(data)
| true
| true
|
f71724f0b63a8c3aadbf9ee4f9fbfb6ac8561c54
| 4,900
|
py
|
Python
|
temboo/core/Library/NPR/StoryFinder/GetListByID.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/NPR/StoryFinder/GetListByID.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/NPR/StoryFinder/GetListByID.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetListByID
# Retrieves a list of NPR categories from a specified list type ID.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetListByID(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetListByID Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetListByID, self).__init__(temboo_session, '/Library/NPR/StoryFinder/GetListByID')
def new_input_set(self):
return GetListByIDInputSet()
def _make_result_set(self, result, path):
return GetListByIDResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetListByIDChoreographyExecution(session, exec_id, path)
class GetListByIDInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetListByID
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ChildrenOf(self, value):
"""
Set the value of the ChildrenOf input for this Choreo. ((optional, integer) Returns only items which are assigned to the given topic ID. For example, if Id=3006 and ChildrenOf=1008 only recent series which are assigned to "Arts & Life" are returned.)
"""
super(GetListByIDInputSet, self)._set_input('ChildrenOf', value)
def set_HideChildren(self, value):
"""
Set the value of the HideChildren input for this Choreo. ((optional, boolean) If set to "1", returns only topics which are not subtopics of another topic.)
"""
super(GetListByIDInputSet, self)._set_input('HideChildren', value)
def set_Id(self, value):
"""
Set the value of the Id input for this Choreo. ((required, integer) The id of the list type you want to retrieve. For example, the list type id for Music Genres is 3218).)
"""
super(GetListByIDInputSet, self)._set_input('Id', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are xml (the default), and json.)
"""
super(GetListByIDInputSet, self)._set_input('ResponseFormat', value)
def set_StoryCountAll(self, value):
"""
Set the value of the StoryCountAll input for this Choreo. ((optional, integer) Returns only items with at least this number of associated stories.)
"""
super(GetListByIDInputSet, self)._set_input('StoryCountAll', value)
def set_StoryCountMonth(self, value):
"""
Set the value of the StoryCountMonth input for this Choreo. ((optional, integer) Returns only items with at least this number of associated stories published in the last month.)
"""
super(GetListByIDInputSet, self)._set_input('StoryCountMonth', value)
def set_StoryCountToday(self, value):
"""
Set the value of the StoryCountToday input for this Choreo. ((optional, integer) Returns only items with at least this number of associated stories published today.)
"""
super(GetListByIDInputSet, self)._set_input('StoryCountToday', value)
class GetListByIDResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetListByID Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from NPR.)
"""
return self._output.get('Response', None)
class GetListByIDChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetListByIDResultSet(response, path)
| 43.362832
| 258
| 0.686939
| true
| true
|
|
f71725ad870be675205ea0d7cbcda78635c972b3
| 3,720
|
py
|
Python
|
CodonSubstitution/build/biopython/Tests/test_SCOP_Raf.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | 1
|
2015-10-07T20:34:29.000Z
|
2015-10-07T20:34:29.000Z
|
CodonSubstitution/build/biopython/Tests/test_SCOP_Raf.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | null | null | null |
CodonSubstitution/build/biopython/Tests/test_SCOP_Raf.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | null | null | null |
"""Unit test for Raf"""
import unittest
from Bio.SCOP import Raf
class RafTests(unittest.TestCase):
rafLine = "101m_ 0.01 38 010301 111011 0 153 0 mm 1 vv 2 ll 3 ss 4 ee 5 gg 6 ee 7 ww 8 qq 9 ll 10 vv 11 ll 12 hh 13 vv 14 ww 15 aa 16 kk 17 vv 18 ee 19 aa 20 dd 21 vv 22 aa 23 gg 24 hh 25 gg 26 qq 27 dd 28 ii 29 ll 30 ii 31 rr 32 ll 33 ff 34 kk 35 ss 36 hh 37 pp 38 ee 39 tt 40 ll 41 ee 42 kk 43 ff 44 dd 45 rr 46 vv 47 kk 48 hh 49 ll 50 kk 51 tt 52 ee 53 aa 54 ee 55 mm 56 kk 57 aa 58 ss 59 ee 60 dd 61 ll 62 kk 63 kk 64 hh 65 gg 66 vv 67 tt 68 vv 69 ll 70 tt 71 aa 72 ll 73 gg 74 aa 75 ii 76 ll 77 kk 78 kk 79 kk 80 gg 81 hh 82 hh 83 ee 84 aa 85 ee 86 ll 87 kk 88 pp 89 ll 90 aa 91 qq 92 ss 93 hh 94 aa 95 tt 96 kk 97 hh 98 kk 99 ii 100 pp 101 ii 102 kk 103 yy 104 ll 105 ee 106 ff 107 ii 108 ss 109 ee 110 aa 111 ii 112 ii 113 hh 114 vv 115 ll 116 hh 117 ss 118 rr 119 hh 120 pp 121 gg 122 nn 123 ff 124 gg 125 aa 126 dd 127 aa 128 qq 129 gg 130 aa 131 mm 132 nn 133 kk 134 aa 135 ll 136 ee 137 ll 138 ff 139 rr 140 kk 141 dd 142 ii 143 aa 144 aa 145 kk 146 yy 147 kk 148 ee 149 ll 150 gg 151 yy 152 qq 153 gg"
rafLine2 = "101mA 0.01 38 010301 111011 0 153 0 mm 1 vv 2 ll 3 ss 4 ee 5 gg 6Aee 7Aww 8Aqq"
rafLine3 = "101mB 0.01 38 010301 111011 0 153 90 mm 91 vv 92 ll 939ss 94 ee 95 gg"
def testParse(self):
"""Can we parse a RAF record?"""
r = Raf.SeqMap(self.rafLine)
self.assertEqual(r.pdbid, "101m")
self.assertEqual(r.pdb_datestamp, "010301")
self.assertEqual(r.flags, "111011")
i = r.index("143")
res = r.res[i]
self.assertEqual(res.chainid, "_")
self.assertEqual(res.resid, "143")
self.assertEqual(res.seqres, "A")
self.assertEqual(res.atom, "A")
r = Raf.SeqMap(self.rafLine2)
res = r.res[r.index("6A", chainid="A")]
self.assertEqual(res.resid, "6A")
self.assertEqual(res.atom, "E")
def testSeqMapAdd(self):
r2 = Raf.SeqMap(self.rafLine2)
r3 = Raf.SeqMap(self.rafLine3)
l = len(r2.res) + len(r3.res)
r2 += r3
self.assertEqual(len(r2.res), l)
r2.extend(r2)
self.assertEqual(len(r2.res), l*2)
r4 = r2 + r2
self.assertEqual(len(r4.res), l*4)
r4.append(Raf.Res())
self.assertEqual(len(r4.res), (l*4)+1)
def testSeqMapSlice(self):
r = Raf.SeqMap(self.rafLine)
r = r[ r.index("124"): r.index("135")+1]
self.assertEqual(len(r.res), 12)
def testSeqMapIndex(self):
filename = ("./SCOP/raftest.txt")
index = Raf.SeqMapIndex(filename)
r = index.getSeqMap("103m")
self.assertEqual(r.pdbid, "103m")
self.assertEqual(len(r.res), 154)
self.assertEqual(r.pdb_datestamp, "010301")
self.assertEqual(r.flags, "111011")
r = index.getSeqMap("103m 1-10")
self.assertEqual(r.pdbid, "103m",)
self.assertEqual(len(r.res), 10)
self.assertEqual(r.pdb_datestamp, "010301")
self.assertEqual(r.flags, "111011")
r = index.getSeqMap("104l A:")
self.assertEqual(r.pdbid, "104l")
r = index.getSeqMap("104l A:112-113")
self.assertEqual(r.pdbid, "104l")
self.assertEqual(len(r.res), 2)
r = index.getSeqMap("104l A:112-113,B:146-148")
self.assertEqual(r.pdbid, "104l")
self.assertEqual(len(r.res), 5)
if __name__=='__main__':
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner=runner)
| 39.157895
| 1,132
| 0.583871
|
import unittest
from Bio.SCOP import Raf
class RafTests(unittest.TestCase):
rafLine = "101m_ 0.01 38 010301 111011 0 153 0 mm 1 vv 2 ll 3 ss 4 ee 5 gg 6 ee 7 ww 8 qq 9 ll 10 vv 11 ll 12 hh 13 vv 14 ww 15 aa 16 kk 17 vv 18 ee 19 aa 20 dd 21 vv 22 aa 23 gg 24 hh 25 gg 26 qq 27 dd 28 ii 29 ll 30 ii 31 rr 32 ll 33 ff 34 kk 35 ss 36 hh 37 pp 38 ee 39 tt 40 ll 41 ee 42 kk 43 ff 44 dd 45 rr 46 vv 47 kk 48 hh 49 ll 50 kk 51 tt 52 ee 53 aa 54 ee 55 mm 56 kk 57 aa 58 ss 59 ee 60 dd 61 ll 62 kk 63 kk 64 hh 65 gg 66 vv 67 tt 68 vv 69 ll 70 tt 71 aa 72 ll 73 gg 74 aa 75 ii 76 ll 77 kk 78 kk 79 kk 80 gg 81 hh 82 hh 83 ee 84 aa 85 ee 86 ll 87 kk 88 pp 89 ll 90 aa 91 qq 92 ss 93 hh 94 aa 95 tt 96 kk 97 hh 98 kk 99 ii 100 pp 101 ii 102 kk 103 yy 104 ll 105 ee 106 ff 107 ii 108 ss 109 ee 110 aa 111 ii 112 ii 113 hh 114 vv 115 ll 116 hh 117 ss 118 rr 119 hh 120 pp 121 gg 122 nn 123 ff 124 gg 125 aa 126 dd 127 aa 128 qq 129 gg 130 aa 131 mm 132 nn 133 kk 134 aa 135 ll 136 ee 137 ll 138 ff 139 rr 140 kk 141 dd 142 ii 143 aa 144 aa 145 kk 146 yy 147 kk 148 ee 149 ll 150 gg 151 yy 152 qq 153 gg"
rafLine2 = "101mA 0.01 38 010301 111011 0 153 0 mm 1 vv 2 ll 3 ss 4 ee 5 gg 6Aee 7Aww 8Aqq"
rafLine3 = "101mB 0.01 38 010301 111011 0 153 90 mm 91 vv 92 ll 939ss 94 ee 95 gg"
def testParse(self):
r = Raf.SeqMap(self.rafLine)
self.assertEqual(r.pdbid, "101m")
self.assertEqual(r.pdb_datestamp, "010301")
self.assertEqual(r.flags, "111011")
i = r.index("143")
res = r.res[i]
self.assertEqual(res.chainid, "_")
self.assertEqual(res.resid, "143")
self.assertEqual(res.seqres, "A")
self.assertEqual(res.atom, "A")
r = Raf.SeqMap(self.rafLine2)
res = r.res[r.index("6A", chainid="A")]
self.assertEqual(res.resid, "6A")
self.assertEqual(res.atom, "E")
def testSeqMapAdd(self):
r2 = Raf.SeqMap(self.rafLine2)
r3 = Raf.SeqMap(self.rafLine3)
l = len(r2.res) + len(r3.res)
r2 += r3
self.assertEqual(len(r2.res), l)
r2.extend(r2)
self.assertEqual(len(r2.res), l*2)
r4 = r2 + r2
self.assertEqual(len(r4.res), l*4)
r4.append(Raf.Res())
self.assertEqual(len(r4.res), (l*4)+1)
def testSeqMapSlice(self):
r = Raf.SeqMap(self.rafLine)
r = r[ r.index("124"): r.index("135")+1]
self.assertEqual(len(r.res), 12)
def testSeqMapIndex(self):
filename = ("./SCOP/raftest.txt")
index = Raf.SeqMapIndex(filename)
r = index.getSeqMap("103m")
self.assertEqual(r.pdbid, "103m")
self.assertEqual(len(r.res), 154)
self.assertEqual(r.pdb_datestamp, "010301")
self.assertEqual(r.flags, "111011")
r = index.getSeqMap("103m 1-10")
self.assertEqual(r.pdbid, "103m",)
self.assertEqual(len(r.res), 10)
self.assertEqual(r.pdb_datestamp, "010301")
self.assertEqual(r.flags, "111011")
r = index.getSeqMap("104l A:")
self.assertEqual(r.pdbid, "104l")
r = index.getSeqMap("104l A:112-113")
self.assertEqual(r.pdbid, "104l")
self.assertEqual(len(r.res), 2)
r = index.getSeqMap("104l A:112-113,B:146-148")
self.assertEqual(r.pdbid, "104l")
self.assertEqual(len(r.res), 5)
if __name__=='__main__':
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner=runner)
| true
| true
|
f71726d73754bd88715d19bd42c1ca917206a9ec
| 526
|
py
|
Python
|
human_services/phone_at_location/migrations/0004_auto_20191230_2338.py
|
pg-irc/pathways-backend
|
05a8c4e750523d2d081b030a248c5444d1ed7992
|
[
"BSD-3-Clause"
] | 12
|
2017-08-30T18:21:00.000Z
|
2021-12-09T04:04:17.000Z
|
human_services/phone_at_location/migrations/0004_auto_20191230_2338.py
|
pg-irc/pathways-backend
|
05a8c4e750523d2d081b030a248c5444d1ed7992
|
[
"BSD-3-Clause"
] | 424
|
2017-08-08T18:32:14.000Z
|
2022-03-30T21:42:51.000Z
|
human_services/phone_at_location/migrations/0004_auto_20191230_2338.py
|
pg-irc/pathways-backend
|
05a8c4e750523d2d081b030a248c5444d1ed7992
|
[
"BSD-3-Clause"
] | 7
|
2017-09-29T21:14:37.000Z
|
2019-12-30T21:07:37.000Z
|
# Generated by Django 2.2.4 on 2019-12-30 23:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('phone_at_location', '0003_auto_20180910_1743'),
]
operations = [
migrations.AlterField(
model_name='phoneatlocation',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phone_numbers', to='locations.Location'),
),
]
| 26.3
| 136
| 0.6673
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('phone_at_location', '0003_auto_20180910_1743'),
]
operations = [
migrations.AlterField(
model_name='phoneatlocation',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phone_numbers', to='locations.Location'),
),
]
| true
| true
|
f717275ca2c8359dcf62adea379cace6a5b31975
| 29,190
|
py
|
Python
|
zipline/pipeline/term.py
|
welly87/zipline
|
dbdfa8ed86417f954e95bd7468e144589f2cd482
|
[
"Apache-2.0"
] | 2
|
2019-05-23T17:19:42.000Z
|
2019-06-19T21:49:21.000Z
|
zipline/pipeline/term.py
|
welly87/zipline
|
dbdfa8ed86417f954e95bd7468e144589f2cd482
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:28:04.000Z
|
2021-03-25T23:28:04.000Z
|
zipline/pipeline/term.py
|
welly87/zipline
|
dbdfa8ed86417f954e95bd7468e144589f2cd482
|
[
"Apache-2.0"
] | 2
|
2020-03-17T23:23:05.000Z
|
2020-05-01T20:26:44.000Z
|
"""
Base class for Filters, Factors and Classifiers
"""
from abc import ABCMeta, abstractproperty
from bisect import insort
from collections import Mapping
from weakref import WeakValueDictionary
from numpy import (
array,
dtype as dtype_class,
ndarray,
searchsorted,
)
from six import with_metaclass
from zipline.assets import Asset
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonExistentAssetInTimeFrame,
NonSliceableTerm,
NonWindowSafeInput,
NotDType,
NonPipelineInputs,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.lib.labelarray import LabelArray
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
)
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from .domain import Domain, GENERIC, infer_domain
from .downsample_helpers import expect_downsample_frequency
from .sentinels import NotSpecified
class Term(with_metaclass(ABCMeta, object)):
"""
Base class for objects that can appear in the compute graph of a
:class:`zipline.pipeline.Pipeline`.
Notes
-----
Most Pipeline API users only interact with :class:`Term` via subclasses:
- :class:`~zipline.pipeline.data.BoundColumn`
- :class:`~zipline.pipeline.Factor`
- :class:`~zipline.pipeline.Filter`
- :class:`~zipline.pipeline.Classifier`
Instances of :class:`Term` are **memoized**. If you call a Term's
constructor with the same arguments twice, the same object will be returned
from both calls:
**Example:**
>>> from zipline.pipeline.data import EquityPricing
>>> from zipline.pipeline.factors import SimpleMovingAverage
>>> x = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)
>>> y = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)
>>> x is y
True
.. warning::
Memoization of terms means that it's generally unsafe to modify
attributes of a term after construction.
"""
# These are NotSpecified because a subclass is required to provide them.
dtype = NotSpecified
missing_value = NotSpecified
# Subclasses aren't required to provide `params`. The default behavior is
# no params.
params = ()
# All terms are generic by default.
domain = GENERIC
# Determines if a term is safe to be used as a windowed input.
window_safe = False
# The dimensions of the term's output (1D or 2D).
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(cls,
domain=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
window_safe=NotSpecified,
ndim=NotSpecified,
# params is explicitly not allowed to be passed to an instance.
*args,
**kwargs):
"""
Memoized constructor for Terms.
Caching previously-constructed Terms is useful because it allows us to
only compute equivalent sub-expressions once when traversing a Pipeline
dependency graph.
Caching previously-constructed Terms is **sane** because terms and
their inputs are both conceptually immutable.
"""
# Subclasses can override these class-level attributes to provide
# different default values for instances.
if domain is NotSpecified:
domain = cls.domain
if dtype is NotSpecified:
dtype = cls.dtype
if missing_value is NotSpecified:
missing_value = cls.missing_value
if ndim is NotSpecified:
ndim = cls.ndim
if window_safe is NotSpecified:
window_safe = cls.window_safe
dtype, missing_value = validate_dtype(
cls.__name__,
dtype,
missing_value,
)
params = cls._pop_params(kwargs)
identity = cls._static_identity(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = \
super(Term, cls).__new__(cls)._init(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
return new_instance
@classmethod
def _pop_params(cls, kwargs):
"""
Pop entries from the `kwargs` passed to cls.__new__ based on the values
in `cls.params`.
Parameters
----------
kwargs : dict
The kwargs passed to cls.__new__.
Returns
-------
params : list[(str, object)]
A list of string, value pairs containing the entries in cls.params.
Raises
------
TypeError
Raised if any parameter values are not passed or not hashable.
"""
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
# Check here that the value is hashable so that we fail here
# instead of trying to hash the param values tuple later.
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__,
name=key
)
)
except TypeError:
# Value wasn't hashable.
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
"""
Noop constructor to play nicely with our caching __new__. Subclasses
should implement _init instead of this method.
When a class' __new__ returns an instance of that class, Python will
automatically call __init__ on the object, even if a new object wasn't
actually constructed. Because we memoize instances, we often return an
object that was already initialized from __new__, in which case we
don't want to call __init__ again.
Subclasses that need to initialize new instances should override _init,
which is guaranteed to be called only once.
"""
pass
@expect_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
return Slice(self, key)
@classmethod
def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
"""
Parameters
----------
domain : zipline.pipeline.domain.Domain
The domain of this term.
dtype : np.dtype
Dtype of this term's output.
missing_value : object
Missing value for this term.
ndim : 1 or 2
The dimensionality of this term.
params : tuple[(str, hashable)]
Tuple of key/value pairs of additional parameters.
"""
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for name, value in params:
if hasattr(self, name):
raise TypeError(
"Parameter {name!r} conflicts with already-present"
" attribute with value {value!r}.".format(
name=name,
value=getattr(self, name),
)
)
# TODO: Consider setting these values as attributes and replacing
# the boilerplate in NumericalExpression, Rank, and
# PercentileFilter.
self.params = dict(params)
# Make sure that subclasses call super() in their _validate() methods
# by setting this flag. The base class implementation of _validate
# should set this flag to True.
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, (
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
del self._subclass_called_super_validate
return self
def _validate(self):
"""
Assert that this term is well-formed. This should be called exactly
once, at the end of Term._init().
"""
# mark that we got here to enforce that subclasses overriding _validate
# call super().
self._subclass_called_super_validate = True
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Calculate the number of extra rows needed to compute ``self``.
Must return at least ``min_extra_rows``, and the default implementation
is to just return ``min_extra_rows``. This is overridden by
downsampled terms to ensure that the first date computed is a
recomputation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. Must be at least
``min_extra_rows``.
"""
return min_extra_rows
@abstractproperty
def inputs(self):
"""
A tuple of other Terms needed as inputs for ``self``.
"""
raise NotImplementedError('inputs')
@abstractproperty
def windowed(self):
"""
Boolean indicating whether this term is a trailing-window computation.
"""
raise NotImplementedError('windowed')
@abstractproperty
def mask(self):
"""
A :class:`~zipline.pipeline.Filter` representing asset/date pairs to
while computing this Term. True means include; False means exclude.
"""
raise NotImplementedError('mask')
@abstractproperty
def dependencies(self):
"""
A dictionary mapping terms that must be computed before `self` to the
number of extra rows needed for those terms.
"""
raise NotImplementedError('dependencies')
def graph_repr(self):
"""A short repr to use when rendering GraphViz graphs.
"""
# Default graph_repr is just the name of the type.
return type(self).__name__
def recursive_repr(self):
"""A short repr to use when recursively rendering terms with inputs.
"""
# Default recursive_repr is just the name of the type.
return type(self).__name__
class AssetExists(Term):
"""
Pseudo-filter describing whether or not an asset existed on a given day.
This is the default mask for all terms that haven't been passed a mask
explicitly.
This is morally a Filter, in the sense that it produces a boolean value for
every asset on every date. We don't subclass Filter, however, because
`AssetExists` is computed directly by the PipelineEngine.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
See Also
--------
zipline.assets.AssetFinder.lifetimes
"""
dtype = bool_dtype
dataset = None
inputs = ()
dependencies = {}
mask = None
windowed = False
def __repr__(self):
return "AssetExists()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"AssetExists cannot be computed directly."
" Check your PipelineEngine configuration."
)
class InputDates(Term):
"""
1-Dimensional term providing date labels for other term inputs.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
"""
ndim = 1
dataset = None
dtype = datetime64ns_dtype
inputs = ()
dependencies = {}
mask = None
windowed = False
window_safe = True
def __repr__(self):
return "InputDates()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"InputDates cannot be computed directly."
" Check your PipelineEngine configuration."
)
class LoadableTerm(Term):
"""
A Term that should be loaded from an external resource by a PipelineLoader.
This is the base class for :class:`zipline.pipeline.data.BoundColumn`.
"""
windowed = False
inputs = ()
@lazyval
def dependencies(self):
return {self.mask: 0}
class ComputableTerm(Term):
"""
A Term that should be computed from a tuple of inputs.
This is the base class for :class:`zipline.pipeline.Factor`,
:class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.
"""
inputs = NotSpecified
outputs = NotSpecified
window_length = NotSpecified
mask = NotSpecified
domain = NotSpecified
def __new__(cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
domain=domain,
*args, **kwargs):
if inputs is NotSpecified:
inputs = cls.inputs
# Having inputs = NotSpecified is an error, but we handle it later
# in self._validate rather than here.
if inputs is not NotSpecified:
# Allow users to specify lists as class-level defaults, but
# normalize to a tuple so that inputs is hashable.
inputs = tuple(inputs)
# Make sure all our inputs are valid pipeline objects before trying
# to infer a domain.
non_terms = [t for t in inputs if not isinstance(t, Term)]
if non_terms:
raise NonPipelineInputs(cls.__name__, non_terms)
if domain is NotSpecified:
domain = infer_domain(inputs)
if outputs is NotSpecified:
outputs = cls.outputs
if outputs is not NotSpecified:
outputs = tuple(outputs)
if mask is NotSpecified:
mask = cls.mask
if mask is NotSpecified:
mask = AssetExists()
if window_length is NotSpecified:
window_length = cls.window_length
return super(ComputableTerm, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
mask=mask,
window_length=window_length,
domain=domain,
*args, **kwargs
)
def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):
self.inputs = inputs
self.outputs = outputs
self.window_length = window_length
self.mask = mask
return super(ComputableTerm, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls,
inputs,
outputs,
window_length,
mask,
*args,
**kwargs):
return (
super(ComputableTerm, cls)._static_identity(*args, **kwargs),
inputs,
outputs,
window_length,
mask,
)
def _validate(self):
super(ComputableTerm, self)._validate()
# Check inputs.
if self.inputs is NotSpecified:
raise TermInputsNotSpecified(termname=type(self).__name__)
if not isinstance(self.domain, Domain):
raise TypeError(
"Expected {}.domain to be an instance of Domain, "
"but got {}.".format(type(self).__name__, type(self.domain))
)
# Check outputs.
if self.outputs is NotSpecified:
pass
elif not self.outputs:
raise TermOutputsEmpty(termname=type(self).__name__)
else:
# Raise an exception if there are any naming conflicts between the
# term's output names and certain attributes.
disallowed_names = [
attr for attr in dir(ComputableTerm)
if not attr.startswith('_')
]
# The name 'compute' is an added special case that is disallowed.
# Use insort to add it to the list in alphabetical order.
insort(disallowed_names, 'compute')
for output in self.outputs:
if output.startswith('_') or output in disallowed_names:
raise InvalidOutputName(
output_name=output,
termname=type(self).__name__,
disallowed_names=disallowed_names,
)
if self.window_length is NotSpecified:
raise WindowLengthNotSpecified(termname=type(self).__name__)
if self.mask is NotSpecified:
# This isn't user error, this is a bug in our code.
raise AssertionError("{term} has no mask".format(term=self))
if self.window_length > 1:
for child in self.inputs:
if not child.window_safe:
raise NonWindowSafeInput(parent=self, child=child)
def _compute(self, inputs, dates, assets, mask):
"""
Subclasses should implement this to perform actual computation.
This is named ``_compute`` rather than just ``compute`` because
``compute`` is reserved for user-supplied functions in
CustomFilter/CustomFactor/CustomClassifier.
"""
raise NotImplementedError()
@lazyval
def windowed(self):
"""
Whether or not this term represents a trailing window computation.
If term.windowed is truthy, its compute_from_windows method will be
called with instances of AdjustedArray as inputs.
If term.windowed is falsey, its compute_from_baseline will be called
with instances of np.ndarray as inputs.
"""
return (
self.window_length is not NotSpecified
and self.window_length > 0
)
@lazyval
def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out
@expect_types(data=ndarray)
def postprocess(self, data):
"""
Called with an result of ``self``, unravelled (i.e. 1-dimensional)
after any user-defined screens have been applied.
This is mostly useful for transforming the dtype of an output, e.g., to
convert a LabelArray into a pandas Categorical.
The default implementation is to just return data unchanged.
"""
return data
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values
def _downsampled_type(self, *args, **kwargs):
"""
The expression type to return from self.downsample().
"""
raise NotImplementedError(
"downsampling is not yet implemented "
"for instances of %s." % type(self).__name__
)
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def downsample(self, frequency):
"""
Make a term that computes from ``self`` at lower-than-daily frequency.
Parameters
----------
{frequency}
"""
return self._downsampled_type(term=self, frequency=frequency)
def _aliased_type(self, *args, **kwargs):
"""
The expression type to return from self.alias().
"""
raise NotImplementedError(
"alias is not yet implemented "
"for instances of %s." % type(self).__name__
)
@templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)
def alias(self, name):
"""
Make a term from ``self`` that names the expression.
Parameters
----------
{name}
Returns
-------
aliased : Aliased
``self`` with a name.
Notes
-----
This is useful for giving a name to a numerical or boolean expression.
"""
return self._aliased_type(term=self, name=name)
def __repr__(self):
return (
"{type}([{inputs}], {window_length})"
).format(
type=type(self).__name__,
inputs=', '.join(i.recursive_repr() for i in self.inputs),
window_length=self.window_length,
)
def recursive_repr(self):
return type(self).__name__ + '(...)'
class Slice(ComputableTerm):
"""
Term for extracting a single column of a another term's output.
Parameters
----------
term : zipline.pipeline.Term
The term from which to extract a column of data.
asset : zipline.assets.Asset
The asset corresponding to the column of `term` to be extracted.
Notes
-----
Users should rarely construct instances of `Slice` directly. Instead, they
should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.
"""
def __new__(cls, term, asset):
return super(Slice, cls).__new__(
cls,
asset=asset,
inputs=[term],
window_length=0,
mask=term.mask,
dtype=term.dtype,
missing_value=term.missing_value,
window_safe=term.window_safe,
ndim=1,
)
def __repr__(self):
return "{parent_term}[{asset}]".format(
type=type(self).__name__,
parent_term=self.inputs[0].recursive_repr(),
asset=self._asset,
)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(Slice, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (super(Slice, cls)._static_identity(*args, **kwargs), asset)
def _compute(self, windows, dates, assets, mask):
asset = self._asset
asset_column = searchsorted(assets.values, asset.sid)
if assets[asset_column] != asset.sid:
raise NonExistentAssetInTimeFrame(
asset=asset, start_date=dates[0], end_date=dates[-1],
)
# Return a 2D array with one column rather than a 1D array of the
# column.
return windows[0][:, [asset_column]]
@property
def asset(self):
"""Get the asset whose data is selected by this slice.
"""
return self._asset
@property
def _downsampled_type(self):
raise NotImplementedError(
'downsampling of slices is not yet supported'
)
def validate_dtype(termname, dtype, missing_value):
"""
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified.
"""
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
if (dtype == categorical_dtype):
# This check is necessary because we use object dtype for
# categoricals, and numpy will allow us to promote numerical
# values to object even though we don't support them.
_assert_valid_categorical_missing_value(missing_value)
# For any other type, we can check if the missing_value is safe by
# making an array of that value and trying to safely convert it to
# the desired type.
# 'same_kind' allows casting between things like float32 and
# float64, but not str and int.
array([missing_value]).astype(dtype=dtype, casting='same_kind')
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
)
| 32.289823
| 79
| 0.598664
|
from abc import ABCMeta, abstractproperty
from bisect import insort
from collections import Mapping
from weakref import WeakValueDictionary
from numpy import (
array,
dtype as dtype_class,
ndarray,
searchsorted,
)
from six import with_metaclass
from zipline.assets import Asset
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonExistentAssetInTimeFrame,
NonSliceableTerm,
NonWindowSafeInput,
NotDType,
NonPipelineInputs,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.lib.labelarray import LabelArray
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
)
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from .domain import Domain, GENERIC, infer_domain
from .downsample_helpers import expect_downsample_frequency
from .sentinels import NotSpecified
class Term(with_metaclass(ABCMeta, object)):
dtype = NotSpecified
missing_value = NotSpecified
# no params.
params = ()
# All terms are generic by default.
domain = GENERIC
# Determines if a term is safe to be used as a windowed input.
window_safe = False
# The dimensions of the term's output (1D or 2D).
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(cls,
domain=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
window_safe=NotSpecified,
ndim=NotSpecified,
*args,
**kwargs):
if domain is NotSpecified:
domain = cls.domain
if dtype is NotSpecified:
dtype = cls.dtype
if missing_value is NotSpecified:
missing_value = cls.missing_value
if ndim is NotSpecified:
ndim = cls.ndim
if window_safe is NotSpecified:
window_safe = cls.window_safe
dtype, missing_value = validate_dtype(
cls.__name__,
dtype,
missing_value,
)
params = cls._pop_params(kwargs)
identity = cls._static_identity(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = \
super(Term, cls).__new__(cls)._init(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
return new_instance
@classmethod
def _pop_params(cls, kwargs):
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__,
name=key
)
)
except TypeError:
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
pass
@expect_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
return Slice(self, key)
@classmethod
def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for name, value in params:
if hasattr(self, name):
raise TypeError(
"Parameter {name!r} conflicts with already-present"
" attribute with value {value!r}.".format(
name=name,
value=getattr(self, name),
)
)
# TODO: Consider setting these values as attributes and replacing
# the boilerplate in NumericalExpression, Rank, and
# PercentileFilter.
self.params = dict(params)
# Make sure that subclasses call super() in their _validate() methods
# by setting this flag. The base class implementation of _validate
# should set this flag to True.
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, (
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
del self._subclass_called_super_validate
return self
def _validate(self):
# mark that we got here to enforce that subclasses overriding _validate
# call super().
self._subclass_called_super_validate = True
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
return min_extra_rows
@abstractproperty
def inputs(self):
raise NotImplementedError('inputs')
@abstractproperty
def windowed(self):
raise NotImplementedError('windowed')
@abstractproperty
def mask(self):
raise NotImplementedError('mask')
@abstractproperty
def dependencies(self):
raise NotImplementedError('dependencies')
def graph_repr(self):
# Default graph_repr is just the name of the type.
return type(self).__name__
def recursive_repr(self):
# Default recursive_repr is just the name of the type.
return type(self).__name__
class AssetExists(Term):
dtype = bool_dtype
dataset = None
inputs = ()
dependencies = {}
mask = None
windowed = False
def __repr__(self):
return "AssetExists()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"AssetExists cannot be computed directly."
" Check your PipelineEngine configuration."
)
class InputDates(Term):
ndim = 1
dataset = None
dtype = datetime64ns_dtype
inputs = ()
dependencies = {}
mask = None
windowed = False
window_safe = True
def __repr__(self):
return "InputDates()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"InputDates cannot be computed directly."
" Check your PipelineEngine configuration."
)
class LoadableTerm(Term):
windowed = False
inputs = ()
@lazyval
def dependencies(self):
return {self.mask: 0}
class ComputableTerm(Term):
inputs = NotSpecified
outputs = NotSpecified
window_length = NotSpecified
mask = NotSpecified
domain = NotSpecified
def __new__(cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
domain=domain,
*args, **kwargs):
if inputs is NotSpecified:
inputs = cls.inputs
# Having inputs = NotSpecified is an error, but we handle it later
# in self._validate rather than here.
if inputs is not NotSpecified:
# Allow users to specify lists as class-level defaults, but
# normalize to a tuple so that inputs is hashable.
inputs = tuple(inputs)
# Make sure all our inputs are valid pipeline objects before trying
# to infer a domain.
non_terms = [t for t in inputs if not isinstance(t, Term)]
if non_terms:
raise NonPipelineInputs(cls.__name__, non_terms)
if domain is NotSpecified:
domain = infer_domain(inputs)
if outputs is NotSpecified:
outputs = cls.outputs
if outputs is not NotSpecified:
outputs = tuple(outputs)
if mask is NotSpecified:
mask = cls.mask
if mask is NotSpecified:
mask = AssetExists()
if window_length is NotSpecified:
window_length = cls.window_length
return super(ComputableTerm, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
mask=mask,
window_length=window_length,
domain=domain,
*args, **kwargs
)
def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):
self.inputs = inputs
self.outputs = outputs
self.window_length = window_length
self.mask = mask
return super(ComputableTerm, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls,
inputs,
outputs,
window_length,
mask,
*args,
**kwargs):
return (
super(ComputableTerm, cls)._static_identity(*args, **kwargs),
inputs,
outputs,
window_length,
mask,
)
def _validate(self):
super(ComputableTerm, self)._validate()
# Check inputs.
if self.inputs is NotSpecified:
raise TermInputsNotSpecified(termname=type(self).__name__)
if not isinstance(self.domain, Domain):
raise TypeError(
"Expected {}.domain to be an instance of Domain, "
"but got {}.".format(type(self).__name__, type(self.domain))
)
# Check outputs.
if self.outputs is NotSpecified:
pass
elif not self.outputs:
raise TermOutputsEmpty(termname=type(self).__name__)
else:
# Raise an exception if there are any naming conflicts between the
# term's output names and certain attributes.
disallowed_names = [
attr for attr in dir(ComputableTerm)
if not attr.startswith('_')
]
insort(disallowed_names, 'compute')
for output in self.outputs:
if output.startswith('_') or output in disallowed_names:
raise InvalidOutputName(
output_name=output,
termname=type(self).__name__,
disallowed_names=disallowed_names,
)
if self.window_length is NotSpecified:
raise WindowLengthNotSpecified(termname=type(self).__name__)
if self.mask is NotSpecified:
raise AssertionError("{term} has no mask".format(term=self))
if self.window_length > 1:
for child in self.inputs:
if not child.window_safe:
raise NonWindowSafeInput(parent=self, child=child)
def _compute(self, inputs, dates, assets, mask):
raise NotImplementedError()
@lazyval
def windowed(self):
return (
self.window_length is not NotSpecified
and self.window_length > 0
)
@lazyval
def dependencies(self):
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out
@expect_types(data=ndarray)
def postprocess(self, data):
return data
def to_workspace_value(self, result, assets):
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values
def _downsampled_type(self, *args, **kwargs):
raise NotImplementedError(
"downsampling is not yet implemented "
"for instances of %s." % type(self).__name__
)
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def downsample(self, frequency):
return self._downsampled_type(term=self, frequency=frequency)
def _aliased_type(self, *args, **kwargs):
raise NotImplementedError(
"alias is not yet implemented "
"for instances of %s." % type(self).__name__
)
@templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)
def alias(self, name):
return self._aliased_type(term=self, name=name)
def __repr__(self):
return (
"{type}([{inputs}], {window_length})"
).format(
type=type(self).__name__,
inputs=', '.join(i.recursive_repr() for i in self.inputs),
window_length=self.window_length,
)
def recursive_repr(self):
return type(self).__name__ + '(...)'
class Slice(ComputableTerm):
def __new__(cls, term, asset):
return super(Slice, cls).__new__(
cls,
asset=asset,
inputs=[term],
window_length=0,
mask=term.mask,
dtype=term.dtype,
missing_value=term.missing_value,
window_safe=term.window_safe,
ndim=1,
)
def __repr__(self):
return "{parent_term}[{asset}]".format(
type=type(self).__name__,
parent_term=self.inputs[0].recursive_repr(),
asset=self._asset,
)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(Slice, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (super(Slice, cls)._static_identity(*args, **kwargs), asset)
def _compute(self, windows, dates, assets, mask):
asset = self._asset
asset_column = searchsorted(assets.values, asset.sid)
if assets[asset_column] != asset.sid:
raise NonExistentAssetInTimeFrame(
asset=asset, start_date=dates[0], end_date=dates[-1],
)
# Return a 2D array with one column rather than a 1D array of the
# column.
return windows[0][:, [asset_column]]
@property
def asset(self):
return self._asset
@property
def _downsampled_type(self):
raise NotImplementedError(
'downsampling of slices is not yet supported'
)
def validate_dtype(termname, dtype, missing_value):
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
if (dtype == categorical_dtype):
# This check is necessary because we use object dtype for
# categoricals, and numpy will allow us to promote numerical
# values to object even though we don't support them.
_assert_valid_categorical_missing_value(missing_value)
array([missing_value]).astype(dtype=dtype, casting='same_kind')
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value
def _assert_valid_categorical_missing_value(value):
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
)
| true
| true
|
f71727c1dc39153ee0b3faeb26aa5bded60a358e
| 1,706
|
py
|
Python
|
sources/models/components/resistor_model.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | 1
|
2021-06-23T20:19:45.000Z
|
2021-06-23T20:19:45.000Z
|
sources/models/components/resistor_model.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | null | null | null |
sources/models/components/resistor_model.py
|
pablintino/Altium-DBlib-source
|
65e85572f84048a7e7c5a116b429e09ac9a33e82
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# Copyright (c) 2020 Pablo Rodriguez Nava, @pablintino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from sqlalchemy import Column, String, ForeignKey
from models.components.component_model import ComponentModel
class ResistorModel(ComponentModel):
__tablename__ = 'resistor'
__id_prefix__ = 'RFIX'
# Primary key
id = Column(ForeignKey("component.id"), primary_key=True)
# Specific properties of a resistor
power_max = Column(String(30))
tolerance = Column(String(30))
# Tells the ORM the type of a specific component by the distinguish column
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
| 37.911111
| 81
| 0.751465
|
from sqlalchemy import Column, String, ForeignKey
from models.components.component_model import ComponentModel
class ResistorModel(ComponentModel):
__tablename__ = 'resistor'
__id_prefix__ = 'RFIX'
id = Column(ForeignKey("component.id"), primary_key=True)
power_max = Column(String(30))
tolerance = Column(String(30))
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
| true
| true
|
f717282762e91799ab097381b849786ec18cef78
| 213
|
py
|
Python
|
tensorbay/opendataset/SegTrack2/__init__.py
|
Hoteryoung/tensorbay-python-sdk
|
53c34dd529c20ec69b34ddd348b5c8e74f4094d0
|
[
"MIT"
] | null | null | null |
tensorbay/opendataset/SegTrack2/__init__.py
|
Hoteryoung/tensorbay-python-sdk
|
53c34dd529c20ec69b34ddd348b5c8e74f4094d0
|
[
"MIT"
] | null | null | null |
tensorbay/opendataset/SegTrack2/__init__.py
|
Hoteryoung/tensorbay-python-sdk
|
53c34dd529c20ec69b34ddd348b5c8e74f4094d0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copytright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
"""Dataloader of the SegTrack2 dataset."""
from .loader import SegTrack2
__all__ = ["SegTrack2"]
| 17.75
| 54
| 0.732394
|
from .loader import SegTrack2
__all__ = ["SegTrack2"]
| true
| true
|
f7172919cb32b1bce840769389a548df78d2e786
| 3,420
|
py
|
Python
|
tempest/api/compute/admin/test_fixed_ips_negative.py
|
midokura/tempest
|
b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/admin/test_fixed_ips_negative.py
|
midokura/tempest
|
b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/admin/test_fixed_ips_negative.py
|
midokura/tempest
|
b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class FixedIPsNegativeTestJson(base.BaseV2ComputeAdminTest):
@classmethod
def resource_setup(cls):
super(FixedIPsNegativeTestJson, cls).resource_setup()
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
cls.client = cls.os_adm.fixed_ips_client
cls.non_admin_client = cls.fixed_ips_client
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.get_server(server['id'])
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
cls.ip = ip['addr']
break
if cls.ip:
break
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_list_fixed_ip_details_with_non_admin_user(self):
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.get_fixed_ip_details, self.ip)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_non_admin_user(self):
body = {"reserve": "None"}
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_unreserve_with_non_admin_user(self):
body = {"unreserve": "None"}
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_invalid_ip(self):
# NOTE(maurosr): since this exercises the same code snippet, we do it
# only for reserve action
body = {"reserve": "None"}
# NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
# change the error code to BadRequest, both exceptions should be
# accepted by tempest
self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest),
self.client.reserve_fixed_ip,
"my.invalid.ip", body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_fixed_ip_with_invalid_action(self):
body = {"invalid_action": "None"}
self.assertRaises(lib_exc.BadRequest,
self.client.reserve_fixed_ip,
self.ip, body)
| 39.767442
| 78
| 0.635965
|
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class FixedIPsNegativeTestJson(base.BaseV2ComputeAdminTest):
@classmethod
def resource_setup(cls):
super(FixedIPsNegativeTestJson, cls).resource_setup()
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
cls.client = cls.os_adm.fixed_ips_client
cls.non_admin_client = cls.fixed_ips_client
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.get_server(server['id'])
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
cls.ip = ip['addr']
break
if cls.ip:
break
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_list_fixed_ip_details_with_non_admin_user(self):
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.get_fixed_ip_details, self.ip)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_non_admin_user(self):
body = {"reserve": "None"}
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_unreserve_with_non_admin_user(self):
body = {"unreserve": "None"}
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_invalid_ip(self):
body = {"reserve": "None"}
self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest),
self.client.reserve_fixed_ip,
"my.invalid.ip", body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_fixed_ip_with_invalid_action(self):
body = {"invalid_action": "None"}
self.assertRaises(lib_exc.BadRequest,
self.client.reserve_fixed_ip,
self.ip, body)
| true
| true
|
f717292cec5c724f5760c7a638af9b8247d18a07
| 21,662
|
py
|
Python
|
phi/tf/tf_backend.py
|
PhylomatX/PhiFlow
|
2b7a73c1f595e288d26945cd53cc482952bb1db9
|
[
"MIT"
] | null | null | null |
phi/tf/tf_backend.py
|
PhylomatX/PhiFlow
|
2b7a73c1f595e288d26945cd53cc482952bb1db9
|
[
"MIT"
] | null | null | null |
phi/tf/tf_backend.py
|
PhylomatX/PhiFlow
|
2b7a73c1f595e288d26945cd53cc482952bb1db9
|
[
"MIT"
] | 1
|
2021-05-04T16:31:41.000Z
|
2021-05-04T16:31:41.000Z
|
import numbers
import uuid
import warnings
from packaging import version
import six
import numpy as np
import six
import tensorflow as tf
from packaging import version
from phi.backend.backend_helper import split_multi_mode_pad, PadSettings, general_grid_sample_nd, equalize_shapes, circular_pad, replicate_pad
from phi.backend.scipy_backend import SciPyBackend
from phi.tf.tf_cuda_resample import *
from . import tf
from phi.backend.backend import Backend
from phi.backend.tensorop import expand, collapsed_gather_nd
class TFBackend(Backend):
def __init__(self):
Backend.__init__(self, "TensorFlow")
@property
def precision_dtype(self):
return {16: np.float16, 32: np.float32, 64: np.float64, None: np.float32}[self.precision]
def is_tensor(self, x, only_native=False):
if not only_native and SciPyBackend().is_tensor(x, only_native=False):
return True
return isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor, tf.Operation))
def as_tensor(self, x, convert_external=True):
if self.is_tensor(x, only_native=convert_external):
tensor = x
elif isinstance(x, np.ndarray):
tensor = tf.convert_to_tensor(SciPyBackend(precision=self.precision).as_tensor(x))
else:
tensor = tf.convert_to_tensor(x)
# --- Enforce Precision ---
if not isinstance(tensor, numbers.Number):
if isinstance(tensor, np.ndarray):
tensor = SciPyBackend(precision=self.precision).as_tensor(tensor)
elif tensor.dtype.is_floating and self.has_fixed_precision:
tensor = self.to_float(tensor)
return tensor
def copy(self, tensor, only_mutable=False):
if not only_mutable or tf.executing_eagerly():
return tf.identity(tensor)
else:
return tensor
def equal(self, x, y):
return tf.equal(x, y)
def divide_no_nan(self, x, y):
if version.parse(tf.__version__) >= version.parse('1.11.0'):
return tf.div_no_nan(x, y)
else:
result = x / y
return tf.where(tf.is_finite(result), result, tf.zeros_like(result))
def random_uniform(self, shape, low=0, high=1):
return tf.random.uniform(shape, minval=low, maxval=high, dtype=self.precision_dtype)
def random_normal(self, shape):
return tf.random.normal(shape, dtype=self.precision_dtype)
def rank(self, value):
return len(value.shape)
def range(self, start, limit=None, delta=1, dtype=None):
return tf.range(start, limit, delta, dtype)
def tile(self, value, multiples):
if isinstance(multiples, (tuple, list)) and self.ndims(value) < len(multiples):
value = self.expand_dims(value, axis=0, number=len(multiples) - self.ndims(value))
return tf.tile(value, multiples)
def stack(self, values, axis=0):
return tf.stack(values, axis=axis)
def concat(self, values, axis):
return tf.concat(values, axis)
def pad(self, value, pad_width, mode='constant', constant_values=0):
passes = split_multi_mode_pad(self.ndims(value), PadSettings(pad_width, mode, constant_values), split_by_constant_value=True)
for pad_pass in passes:
value = self._single_mode_single_constant_pad(value, *pad_pass)
return value
def _single_mode_single_constant_pad(self, value, pad_width, single_mode, constant_value=0):
assert single_mode in ('constant', 'symmetric', 'circular', 'reflect', 'replicate'), single_mode
if single_mode == 'circular':
return circular_pad(value, pad_width, self)
if single_mode == 'replicate':
if np.any(np.array(pad_width) > 1):
return replicate_pad(value, pad_width, self)
else:
single_mode = 'symmetric'
return tf.pad(value, pad_width, single_mode.upper(), constant_values=constant_value) # constant, symmetric, reflect
def reshape(self, value, shape):
return tf.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if axis is not None:
if not isinstance(axis, int):
axis = list(axis)
return tf.reduce_sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if axis is not None:
if not isinstance(axis, int):
axis = list(axis)
if value.dtype == bool:
return tf.reduce_all(value, axis=axis)
return tf.reduce_prod(value, axis=axis)
def where(self, condition, x=None, y=None):
c = self.cast(condition, self.dtype(x))
return c * x + (1 - c) * y
# return tf.where(condition, x, y) # TF1 has an inconsistent broadcasting rule for where
def mean(self, value, axis=None, keepdims=False):
if axis is not None:
if not isinstance(axis, int):
axis = list(axis)
return tf.reduce_mean(value, axis, keepdims=keepdims)
def py_func(self, func, inputs, Tout, shape_out, stateful=True, name=None, grad=None):
if grad is None:
result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)
else:
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(uuid.uuid4())
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)
if shape_out is not None:
result.set_shape(shape_out)
return result
def resample(self, inputs, sample_coords, interpolation='linear', boundary='constant', constant_values=0):
assert interpolation == 'linear'
if use_cuda(inputs):
return resample_cuda(inputs, sample_coords, boundary)
else:
return general_grid_sample_nd(inputs, sample_coords, boundary, constant_values, self) # while this is a bit slower than niftynet, it give consisten results at the boundaries
def zeros_like(self, tensor):
return tf.zeros_like(tensor)
def ones_like(self, tensor):
return tf.ones_like(tensor)
def dot(self, a, b, axes):
return tf.tensordot(a, b, axes)
def matmul(self, A, b):
if isinstance(A, tf.SparseTensor):
result = tf.sparse_tensor_dense_matmul(A, tf.transpose(b))
result = tf.transpose(result)
result.set_shape(tf.TensorShape([b.shape[0], A.shape[0]]))
return result
else:
return tf.matmul(A, b)
def einsum(self, equation, *tensors):
return tf.einsum(equation, *tensors)
def while_loop(self, cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None, maximum_iterations=None):
return tf.while_loop(cond, body, loop_vars,
shape_invariants=shape_invariants,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
name=name,
maximum_iterations=maximum_iterations)
def abs(self, x):
return tf.abs(x)
def sign(self, x):
return tf.sign(x)
def round(self, x):
return tf.round(x)
def ceil(self, x):
return tf.ceil(x)
def floor(self, x):
return tf.floor(x)
def max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return tf.reduce_min(x, axis=axis, keepdims=keepdims)
def with_custom_gradient(self, function, inputs, gradient, input_index=0, output_index=None, name_base="custom_gradient_func"):
# Setup custom gradient
gradient_name = name_base + "_" + str(uuid.uuid4())
tf.RegisterGradient(gradient_name)(gradient)
g = tf.get_default_graph()
with g.gradient_override_map({"Identity": gradient_name}):
fake_function = tf.identity(inputs[input_index])
outputs = function(*inputs)
output = outputs if output_index is None else outputs[output_index]
output_with_gradient = fake_function + tf.stop_gradient(output - fake_function)
if output_index is None:
return output_with_gradient
else:
outputs = list(outputs)
outputs[output_index] = output_with_gradient
return outputs
def maximum(self, a, b):
return tf.maximum(a, b)
def minimum(self, a, b):
return tf.minimum(a, b)
def clip(self, x, minimum, maximum):
return tf.clip_by_value(x, minimum, maximum)
def sqrt(self, x):
return tf.sqrt(x)
def exp(self, x):
return tf.exp(x)
def conv(self, tensor, kernel, padding="SAME"):
rank = tensor_spatial_rank(tensor)
padding = padding.upper()
if rank == 1:
result = tf.nn.conv1d(tensor, kernel, 1, padding)
elif rank == 2:
result = tf.nn.conv2d(tensor, kernel, [1, 1, 1, 1], padding)
elif rank == 3:
result = tf.nn.conv3d(tensor, kernel, [1, 1, 1, 1, 1], padding)
else:
raise ValueError("Tensor must be of rank 1, 2 or 3 but is %d" % rank)
return result
def expand_dims(self, a, axis=0, number=1):
if number == 0:
return a
for _i in range(number):
a = tf.expand_dims(a, axis)
return a
def shape(self, tensor):
return tf.shape(tensor)
def to_float(self, x, float64=False):
if float64:
warnings.warn('float64 argument is deprecated, set Backend.precision = 64 to use 64 bit operations.', DeprecationWarning)
return tf.cast(x, tf.float64)
else:
return tf.cast(x, self.precision_dtype)
def staticshape(self, tensor):
if self.is_tensor(tensor, only_native=True):
return tuple(tensor.shape.as_list())
else:
return np.shape(tensor)
def to_int(self, x, int64=False):
return tf.cast(x, tf.int64) if int64 else tf.cast(x, tf.int32)
def to_complex(self, x):
if self.dtype(x) in (np.complex64, np.complex128):
return x
if self.dtype(x) == np.float64:
return tf.to_complex128(x)
else:
return tf.to_complex64(x)
def gather(self, values, indices):
if isinstance(indices, slice):
return values[indices]
return tf.gather(values, indices)
def gather_nd(self, values, indices, batch_dims=0):
if batch_dims == 0:
return tf.gather_nd(values, indices)
elif version.parse(tf.__version__) >= version.parse('1.14.0'):
return tf.gather_nd(values, indices, batch_dims=batch_dims)
else:
if batch_dims > 1:
raise NotImplementedError('batch_dims > 1 only supported on TensorFlow >= 1.14')
batch_size = self.shape(values)[0]
batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * (self.ndims(indices) - 1))
batch_ids = tf.tile(batch_ids, [1] + self.shape(indices)[1:-1] + [1])
indices = tf.concat([batch_ids, indices], -1)
return tf.gather_nd(values, indices)
def unstack(self, tensor, axis=0, keepdims=False):
unstacked = tf.unstack(tensor, axis=axis)
if keepdims:
unstacked = [self.expand_dims(c, axis=axis) for c in unstacked]
return unstacked
def std(self, x, axis=None, keepdims=False):
_mean, var = tf.nn.moments(x, axis, keepdims=keepdims)
return tf.sqrt(var)
def boolean_mask(self, x, mask):
return tf.boolean_mask(x, mask)
def isfinite(self, x):
return tf.is_finite(x)
def any(self, boolean_tensor, axis=None, keepdims=False):
return tf.reduce_any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
return tf.reduce_all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, points, indices, values, shape, duplicates_handling='undefined'):
# Change indexing so batch number is included as first element of the index, for example: [0,31,24] indexes the first batch (batch 0) and 2D coordinates (31,24).
buffer = tf.zeros(shape, dtype=values.dtype)
repetitions = []
for dim in range(len(indices.shape) - 1):
if values.shape[dim] == 1:
repetitions.append(indices.shape[dim])
else:
assert indices.shape[dim] == values.shape[dim]
repetitions.append(1)
repetitions.append(1)
values = self.tile(values, repetitions)
if duplicates_handling == 'add':
# Only for Tensorflow with custom gradient
@tf.custom_gradient
def scatter_density(points, indices, values):
result = tf.tensor_scatter_add(buffer, indices, values)
def grad(dr):
return self.resample(gradient(dr, difference='central'), points), None, None
return result, grad
return scatter_density(points, indices, values)
elif duplicates_handling == 'mean':
# Won't entirely work with out of bounds particles (still counted in mean)
count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))
total = tf.tensor_scatter_add(buffer, indices, values)
return total / tf.maximum(1.0, count)
else: # last, any, undefined
# indices = self.to_int(indices, int64=True)
# st = tf.SparseTensor(indices, values, shape) # ToDo this only supports 2D shapes
# st = tf.sparse.reorder(st) # only needed if not ordered
# return tf.sparse.to_dense(st)
count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))
total = tf.tensor_scatter_add(buffer, indices, values)
return total / tf.maximum(1.0, count)
def fft(self, x):
rank = len(x.shape) - 2
assert rank >= 1
x = self.to_complex(x)
if rank == 1:
return tf.stack([tf.fft(c) for c in tf.unstack(x, axis=-1)], axis=-1)
elif rank == 2:
return tf.stack([tf.fft2d(c) for c in tf.unstack(x, axis=-1)], axis=-1)
elif rank == 3:
return tf.stack([tf.fft3d(c) for c in tf.unstack(x, axis=-1)], axis=-1)
else:
raise NotImplementedError('n-dimensional FFT not implemented.')
def ifft(self, k):
rank = len(k.shape) - 2
assert rank >= 1
if rank == 1:
return tf.stack([tf.ifft(c) for c in tf.unstack(k, axis=-1)], axis=-1)
elif rank == 2:
return tf.stack([tf.ifft2d(c) for c in tf.unstack(k, axis=-1)], axis=-1)
elif rank == 3:
return tf.stack([tf.ifft3d(c) for c in tf.unstack(k, axis=-1)], axis=-1)
else:
raise NotImplementedError('n-dimensional inverse FFT not implemented.')
def imag(self, complex):
return tf.imag(complex)
def real(self, complex):
return tf.real(complex)
def cast(self, x, dtype):
return tf.cast(x, dtype)
def sin(self, x):
return tf.sin(x)
def cos(self, x):
return tf.cos(x)
def dtype(self, array):
if self.is_tensor(array, only_native=True):
return array.dtype.as_numpy_dtype
else:
return SciPyBackend().dtype(array)
def sparse_tensor(self, indices, values, shape):
return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
# from niftynet.layer.resampler.py
# https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyNet/blob/69c98e5a95cc6788ad9fb8c5e27dc24d1acec634/niftynet/layer/resampler.py
COORDINATES_TYPE = tf.int32
EPS = 1e-6
def tensor_spatial_rank(tensor):
return len(tensor.shape) - 2
def unit_direction(dim, spatial_rank): # ordered like z,y,x
direction = [1 if i == dim else 0 for i in range(spatial_rank)]
for _i in range(spatial_rank):
direction = tf.expand_dims(direction, axis=0)
return direction
def _resample_no_pack(grid, coords, boundary_func):
resolution = np.array([int(d) for d in grid.shape[1:-1]])
sp_rank = tensor_spatial_rank(grid)
floor = boundary_func(tf.floor(coords), resolution)
up_weights = coords - floor
lo_weights = TFBackend().unstack(1 - up_weights, axis=-1, keepdims=True)
up_weights = TFBackend().unstack(up_weights, axis=-1, keepdims=True)
base_coords = tf.cast(floor, tf.int32)
def interpolate_nd(coords, axis):
direction = np.array([1 if ax == axis else 0 for ax in range(sp_rank)])
print(direction.shape)
with tf.variable_scope('coord_plus_one'):
up_coords = coords + direction # This is extremely slow for some reason - ToDo tile direction array to have same dimensions before calling interpolate_nd?
if axis == sp_rank - 1:
# up_coords = boundary_func(up_coords, resolution)
lo_values = tf.gather_nd(grid, coords, batch_dims=1)
up_values = tf.gather_nd(grid, up_coords, batch_dims=1)
else:
lo_values = interpolate_nd(coords, axis + 1)
up_values = interpolate_nd(up_coords, axis + 1)
with tf.variable_scope('weighted_sum_axis_%d' % axis):
return lo_values * lo_weights[axis] + up_values * up_weights[axis]
with tf.variable_scope('interpolate_nd'):
result = interpolate_nd(base_coords, 0)
return result
def _resample_linear_niftynet(inputs, sample_coords, boundary, boundary_func, float_type):
inputs = tf.convert_to_tensor(inputs)
sample_coords = tf.convert_to_tensor(sample_coords)
in_spatial_size = [int(d) for d in inputs.shape[1:-1]]
in_spatial_rank = tensor_spatial_rank(inputs)
batch_size = tf.shape(inputs)[0]
out_spatial_rank = tensor_spatial_rank(sample_coords)
out_spatial_size = sample_coords.get_shape().as_list()[1:-1]
if sample_coords.shape[0] != inputs.shape[0]:
sample_coords = tf.tile(sample_coords, [batch_size] + [1] * (len(sample_coords.shape) - 1))
xy = tf.unstack(sample_coords, axis=-1)
base_coords = [tf.floor(coords) for coords in xy]
floor_coords = [tf.cast(boundary_func(x, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]
ceil_coords = [tf.cast(boundary_func(x + 1.0, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]
if boundary.upper() == 'ZERO':
weight_0 = [tf.expand_dims(x - tf.cast(i, float_type), -1) for (x, i) in zip(xy, floor_coords)]
weight_1 = [tf.expand_dims(tf.cast(i, float_type) - x, -1) for (x, i) in zip(xy, ceil_coords)]
else:
weight_0 = [tf.expand_dims(x - i, -1) for (x, i) in zip(xy, base_coords)]
weight_1 = [1.0 - w for w in weight_0]
batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * out_spatial_rank)
batch_ids = tf.tile(batch_ids, [1] + out_spatial_size)
sc = (floor_coords, ceil_coords)
binary_neighbour_ids = [[int(c) for c in format(i, '0%ib' % in_spatial_rank)] for i in range(2 ** in_spatial_rank)]
def get_knot(bc):
coord = [sc[c][i] for i, c in enumerate(bc)]
if version.parse(tf.__version__) >= version.parse('1.14.0'):
coord = tf.stack(coord, -1)
return tf.gather_nd(inputs, coord, batch_dims=1) # NaN can cause negative integers here
else:
coord = tf.stack([batch_ids] + coord, -1)
return tf.gather_nd(inputs, coord) # NaN can cause negative integers here
samples = [get_knot(bc) for bc in binary_neighbour_ids]
def _pyramid_combination(samples, w_0, w_1):
if len(w_0) == 1:
return samples[0] * w_1[0] + samples[1] * w_0[0]
f_0 = _pyramid_combination(samples[::2], w_0[:-1], w_1[:-1])
f_1 = _pyramid_combination(samples[1::2], w_0[:-1], w_1[:-1])
return f_0 * w_1[-1] + f_1 * w_0[-1]
return _pyramid_combination(samples, weight_0, weight_1)
def _boundary_snap(sample_coords, spatial_shape):
max_indices = [l - 1 for l in spatial_shape]
for _i in range(len(spatial_shape)):
max_indices = tf.expand_dims(max_indices, 0)
sample_coords = tf.minimum(sample_coords, max_indices)
sample_coords = tf.maximum(sample_coords, 0)
return sample_coords
def _boundary_replicate(sample_coords, input_size):
return tf.maximum(tf.minimum(sample_coords, input_size - 1), 0)
def _boundary_circular(sample_coords, input_size):
return tf.mod(tf.mod(sample_coords, input_size) + input_size, input_size)
def _boundary_symmetric(sample_coords, input_size):
sample_coords = _boundary_circular(sample_coords, 2 * input_size)
return ((2 * input_size - 1) - tf.abs((2 * input_size - 1) - 2 * sample_coords)) // 2
def _boundary_reflect(sample_coords, input_size):
sample_coords = _boundary_circular(sample_coords, 2 * input_size - 2)
return (input_size - 1) - tf.abs((input_size - 1) - sample_coords)
SUPPORTED_BOUNDARY = {
'zero': _boundary_replicate,
'replicate': _boundary_replicate,
'circular': _boundary_circular,
'symmetric': _boundary_symmetric,
'reflect': _boundary_reflect,
}
| 39.030631
| 186
| 0.634383
|
import numbers
import uuid
import warnings
from packaging import version
import six
import numpy as np
import six
import tensorflow as tf
from packaging import version
from phi.backend.backend_helper import split_multi_mode_pad, PadSettings, general_grid_sample_nd, equalize_shapes, circular_pad, replicate_pad
from phi.backend.scipy_backend import SciPyBackend
from phi.tf.tf_cuda_resample import *
from . import tf
from phi.backend.backend import Backend
from phi.backend.tensorop import expand, collapsed_gather_nd
class TFBackend(Backend):
def __init__(self):
Backend.__init__(self, "TensorFlow")
@property
def precision_dtype(self):
return {16: np.float16, 32: np.float32, 64: np.float64, None: np.float32}[self.precision]
def is_tensor(self, x, only_native=False):
if not only_native and SciPyBackend().is_tensor(x, only_native=False):
return True
return isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor, tf.Operation))
def as_tensor(self, x, convert_external=True):
if self.is_tensor(x, only_native=convert_external):
tensor = x
elif isinstance(x, np.ndarray):
tensor = tf.convert_to_tensor(SciPyBackend(precision=self.precision).as_tensor(x))
else:
tensor = tf.convert_to_tensor(x)
if not isinstance(tensor, numbers.Number):
if isinstance(tensor, np.ndarray):
tensor = SciPyBackend(precision=self.precision).as_tensor(tensor)
elif tensor.dtype.is_floating and self.has_fixed_precision:
tensor = self.to_float(tensor)
return tensor
def copy(self, tensor, only_mutable=False):
if not only_mutable or tf.executing_eagerly():
return tf.identity(tensor)
else:
return tensor
def equal(self, x, y):
return tf.equal(x, y)
def divide_no_nan(self, x, y):
if version.parse(tf.__version__) >= version.parse('1.11.0'):
return tf.div_no_nan(x, y)
else:
result = x / y
return tf.where(tf.is_finite(result), result, tf.zeros_like(result))
def random_uniform(self, shape, low=0, high=1):
return tf.random.uniform(shape, minval=low, maxval=high, dtype=self.precision_dtype)
def random_normal(self, shape):
return tf.random.normal(shape, dtype=self.precision_dtype)
def rank(self, value):
return len(value.shape)
def range(self, start, limit=None, delta=1, dtype=None):
return tf.range(start, limit, delta, dtype)
def tile(self, value, multiples):
if isinstance(multiples, (tuple, list)) and self.ndims(value) < len(multiples):
value = self.expand_dims(value, axis=0, number=len(multiples) - self.ndims(value))
return tf.tile(value, multiples)
def stack(self, values, axis=0):
return tf.stack(values, axis=axis)
def concat(self, values, axis):
return tf.concat(values, axis)
def pad(self, value, pad_width, mode='constant', constant_values=0):
passes = split_multi_mode_pad(self.ndims(value), PadSettings(pad_width, mode, constant_values), split_by_constant_value=True)
for pad_pass in passes:
value = self._single_mode_single_constant_pad(value, *pad_pass)
return value
def _single_mode_single_constant_pad(self, value, pad_width, single_mode, constant_value=0):
assert single_mode in ('constant', 'symmetric', 'circular', 'reflect', 'replicate'), single_mode
if single_mode == 'circular':
return circular_pad(value, pad_width, self)
if single_mode == 'replicate':
if np.any(np.array(pad_width) > 1):
return replicate_pad(value, pad_width, self)
else:
single_mode = 'symmetric'
return tf.pad(value, pad_width, single_mode.upper(), constant_values=constant_value)
def reshape(self, value, shape):
return tf.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if axis is not None:
if not isinstance(axis, int):
axis = list(axis)
return tf.reduce_sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if axis is not None:
if not isinstance(axis, int):
axis = list(axis)
if value.dtype == bool:
return tf.reduce_all(value, axis=axis)
return tf.reduce_prod(value, axis=axis)
def where(self, condition, x=None, y=None):
c = self.cast(condition, self.dtype(x))
return c * x + (1 - c) * y
):
if axis is not None:
if not isinstance(axis, int):
axis = list(axis)
return tf.reduce_mean(value, axis, keepdims=keepdims)
def py_func(self, func, inputs, Tout, shape_out, stateful=True, name=None, grad=None):
if grad is None:
result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)
else:
rnd_name = 'PyFuncGrad' + str(uuid.uuid4())
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)
if shape_out is not None:
result.set_shape(shape_out)
return result
def resample(self, inputs, sample_coords, interpolation='linear', boundary='constant', constant_values=0):
assert interpolation == 'linear'
if use_cuda(inputs):
return resample_cuda(inputs, sample_coords, boundary)
else:
return general_grid_sample_nd(inputs, sample_coords, boundary, constant_values, self)
def zeros_like(self, tensor):
return tf.zeros_like(tensor)
def ones_like(self, tensor):
return tf.ones_like(tensor)
def dot(self, a, b, axes):
return tf.tensordot(a, b, axes)
def matmul(self, A, b):
if isinstance(A, tf.SparseTensor):
result = tf.sparse_tensor_dense_matmul(A, tf.transpose(b))
result = tf.transpose(result)
result.set_shape(tf.TensorShape([b.shape[0], A.shape[0]]))
return result
else:
return tf.matmul(A, b)
def einsum(self, equation, *tensors):
return tf.einsum(equation, *tensors)
def while_loop(self, cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None, maximum_iterations=None):
return tf.while_loop(cond, body, loop_vars,
shape_invariants=shape_invariants,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
name=name,
maximum_iterations=maximum_iterations)
def abs(self, x):
return tf.abs(x)
def sign(self, x):
return tf.sign(x)
def round(self, x):
return tf.round(x)
def ceil(self, x):
return tf.ceil(x)
def floor(self, x):
return tf.floor(x)
def max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return tf.reduce_min(x, axis=axis, keepdims=keepdims)
def with_custom_gradient(self, function, inputs, gradient, input_index=0, output_index=None, name_base="custom_gradient_func"):
gradient_name = name_base + "_" + str(uuid.uuid4())
tf.RegisterGradient(gradient_name)(gradient)
g = tf.get_default_graph()
with g.gradient_override_map({"Identity": gradient_name}):
fake_function = tf.identity(inputs[input_index])
outputs = function(*inputs)
output = outputs if output_index is None else outputs[output_index]
output_with_gradient = fake_function + tf.stop_gradient(output - fake_function)
if output_index is None:
return output_with_gradient
else:
outputs = list(outputs)
outputs[output_index] = output_with_gradient
return outputs
def maximum(self, a, b):
return tf.maximum(a, b)
def minimum(self, a, b):
return tf.minimum(a, b)
def clip(self, x, minimum, maximum):
return tf.clip_by_value(x, minimum, maximum)
def sqrt(self, x):
return tf.sqrt(x)
def exp(self, x):
return tf.exp(x)
def conv(self, tensor, kernel, padding="SAME"):
rank = tensor_spatial_rank(tensor)
padding = padding.upper()
if rank == 1:
result = tf.nn.conv1d(tensor, kernel, 1, padding)
elif rank == 2:
result = tf.nn.conv2d(tensor, kernel, [1, 1, 1, 1], padding)
elif rank == 3:
result = tf.nn.conv3d(tensor, kernel, [1, 1, 1, 1, 1], padding)
else:
raise ValueError("Tensor must be of rank 1, 2 or 3 but is %d" % rank)
return result
def expand_dims(self, a, axis=0, number=1):
if number == 0:
return a
for _i in range(number):
a = tf.expand_dims(a, axis)
return a
def shape(self, tensor):
return tf.shape(tensor)
def to_float(self, x, float64=False):
if float64:
warnings.warn('float64 argument is deprecated, set Backend.precision = 64 to use 64 bit operations.', DeprecationWarning)
return tf.cast(x, tf.float64)
else:
return tf.cast(x, self.precision_dtype)
def staticshape(self, tensor):
if self.is_tensor(tensor, only_native=True):
return tuple(tensor.shape.as_list())
else:
return np.shape(tensor)
def to_int(self, x, int64=False):
return tf.cast(x, tf.int64) if int64 else tf.cast(x, tf.int32)
def to_complex(self, x):
if self.dtype(x) in (np.complex64, np.complex128):
return x
if self.dtype(x) == np.float64:
return tf.to_complex128(x)
else:
return tf.to_complex64(x)
def gather(self, values, indices):
if isinstance(indices, slice):
return values[indices]
return tf.gather(values, indices)
def gather_nd(self, values, indices, batch_dims=0):
if batch_dims == 0:
return tf.gather_nd(values, indices)
elif version.parse(tf.__version__) >= version.parse('1.14.0'):
return tf.gather_nd(values, indices, batch_dims=batch_dims)
else:
if batch_dims > 1:
raise NotImplementedError('batch_dims > 1 only supported on TensorFlow >= 1.14')
batch_size = self.shape(values)[0]
batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * (self.ndims(indices) - 1))
batch_ids = tf.tile(batch_ids, [1] + self.shape(indices)[1:-1] + [1])
indices = tf.concat([batch_ids, indices], -1)
return tf.gather_nd(values, indices)
def unstack(self, tensor, axis=0, keepdims=False):
unstacked = tf.unstack(tensor, axis=axis)
if keepdims:
unstacked = [self.expand_dims(c, axis=axis) for c in unstacked]
return unstacked
def std(self, x, axis=None, keepdims=False):
_mean, var = tf.nn.moments(x, axis, keepdims=keepdims)
return tf.sqrt(var)
def boolean_mask(self, x, mask):
return tf.boolean_mask(x, mask)
def isfinite(self, x):
return tf.is_finite(x)
def any(self, boolean_tensor, axis=None, keepdims=False):
return tf.reduce_any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
return tf.reduce_all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, points, indices, values, shape, duplicates_handling='undefined'):
buffer = tf.zeros(shape, dtype=values.dtype)
repetitions = []
for dim in range(len(indices.shape) - 1):
if values.shape[dim] == 1:
repetitions.append(indices.shape[dim])
else:
assert indices.shape[dim] == values.shape[dim]
repetitions.append(1)
repetitions.append(1)
values = self.tile(values, repetitions)
if duplicates_handling == 'add':
@tf.custom_gradient
def scatter_density(points, indices, values):
result = tf.tensor_scatter_add(buffer, indices, values)
def grad(dr):
return self.resample(gradient(dr, difference='central'), points), None, None
return result, grad
return scatter_density(points, indices, values)
elif duplicates_handling == 'mean':
count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))
total = tf.tensor_scatter_add(buffer, indices, values)
return total / tf.maximum(1.0, count)
else: # last, any, undefined
# indices = self.to_int(indices, int64=True)
# st = tf.SparseTensor(indices, values, shape) # ToDo this only supports 2D shapes
# st = tf.sparse.reorder(st) # only needed if not ordered
# return tf.sparse.to_dense(st)
count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))
total = tf.tensor_scatter_add(buffer, indices, values)
return total / tf.maximum(1.0, count)
def fft(self, x):
rank = len(x.shape) - 2
assert rank >= 1
x = self.to_complex(x)
if rank == 1:
return tf.stack([tf.fft(c) for c in tf.unstack(x, axis=-1)], axis=-1)
elif rank == 2:
return tf.stack([tf.fft2d(c) for c in tf.unstack(x, axis=-1)], axis=-1)
elif rank == 3:
return tf.stack([tf.fft3d(c) for c in tf.unstack(x, axis=-1)], axis=-1)
else:
raise NotImplementedError('n-dimensional FFT not implemented.')
def ifft(self, k):
rank = len(k.shape) - 2
assert rank >= 1
if rank == 1:
return tf.stack([tf.ifft(c) for c in tf.unstack(k, axis=-1)], axis=-1)
elif rank == 2:
return tf.stack([tf.ifft2d(c) for c in tf.unstack(k, axis=-1)], axis=-1)
elif rank == 3:
return tf.stack([tf.ifft3d(c) for c in tf.unstack(k, axis=-1)], axis=-1)
else:
raise NotImplementedError('n-dimensional inverse FFT not implemented.')
def imag(self, complex):
return tf.imag(complex)
def real(self, complex):
return tf.real(complex)
def cast(self, x, dtype):
return tf.cast(x, dtype)
def sin(self, x):
return tf.sin(x)
def cos(self, x):
return tf.cos(x)
def dtype(self, array):
if self.is_tensor(array, only_native=True):
return array.dtype.as_numpy_dtype
else:
return SciPyBackend().dtype(array)
def sparse_tensor(self, indices, values, shape):
return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
# from niftynet.layer.resampler.py
# https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyNet/blob/69c98e5a95cc6788ad9fb8c5e27dc24d1acec634/niftynet/layer/resampler.py
COORDINATES_TYPE = tf.int32
EPS = 1e-6
def tensor_spatial_rank(tensor):
return len(tensor.shape) - 2
def unit_direction(dim, spatial_rank): # ordered like z,y,x
direction = [1 if i == dim else 0 for i in range(spatial_rank)]
for _i in range(spatial_rank):
direction = tf.expand_dims(direction, axis=0)
return direction
def _resample_no_pack(grid, coords, boundary_func):
resolution = np.array([int(d) for d in grid.shape[1:-1]])
sp_rank = tensor_spatial_rank(grid)
floor = boundary_func(tf.floor(coords), resolution)
up_weights = coords - floor
lo_weights = TFBackend().unstack(1 - up_weights, axis=-1, keepdims=True)
up_weights = TFBackend().unstack(up_weights, axis=-1, keepdims=True)
base_coords = tf.cast(floor, tf.int32)
def interpolate_nd(coords, axis):
direction = np.array([1 if ax == axis else 0 for ax in range(sp_rank)])
print(direction.shape)
with tf.variable_scope('coord_plus_one'):
up_coords = coords + direction # This is extremely slow for some reason - ToDo tile direction array to have same dimensions before calling interpolate_nd?
if axis == sp_rank - 1:
# up_coords = boundary_func(up_coords, resolution)
lo_values = tf.gather_nd(grid, coords, batch_dims=1)
up_values = tf.gather_nd(grid, up_coords, batch_dims=1)
else:
lo_values = interpolate_nd(coords, axis + 1)
up_values = interpolate_nd(up_coords, axis + 1)
with tf.variable_scope('weighted_sum_axis_%d' % axis):
return lo_values * lo_weights[axis] + up_values * up_weights[axis]
with tf.variable_scope('interpolate_nd'):
result = interpolate_nd(base_coords, 0)
return result
def _resample_linear_niftynet(inputs, sample_coords, boundary, boundary_func, float_type):
inputs = tf.convert_to_tensor(inputs)
sample_coords = tf.convert_to_tensor(sample_coords)
in_spatial_size = [int(d) for d in inputs.shape[1:-1]]
in_spatial_rank = tensor_spatial_rank(inputs)
batch_size = tf.shape(inputs)[0]
out_spatial_rank = tensor_spatial_rank(sample_coords)
out_spatial_size = sample_coords.get_shape().as_list()[1:-1]
if sample_coords.shape[0] != inputs.shape[0]:
sample_coords = tf.tile(sample_coords, [batch_size] + [1] * (len(sample_coords.shape) - 1))
xy = tf.unstack(sample_coords, axis=-1)
base_coords = [tf.floor(coords) for coords in xy]
floor_coords = [tf.cast(boundary_func(x, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]
ceil_coords = [tf.cast(boundary_func(x + 1.0, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]
if boundary.upper() == 'ZERO':
weight_0 = [tf.expand_dims(x - tf.cast(i, float_type), -1) for (x, i) in zip(xy, floor_coords)]
weight_1 = [tf.expand_dims(tf.cast(i, float_type) - x, -1) for (x, i) in zip(xy, ceil_coords)]
else:
weight_0 = [tf.expand_dims(x - i, -1) for (x, i) in zip(xy, base_coords)]
weight_1 = [1.0 - w for w in weight_0]
batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * out_spatial_rank)
batch_ids = tf.tile(batch_ids, [1] + out_spatial_size)
sc = (floor_coords, ceil_coords)
binary_neighbour_ids = [[int(c) for c in format(i, '0%ib' % in_spatial_rank)] for i in range(2 ** in_spatial_rank)]
def get_knot(bc):
coord = [sc[c][i] for i, c in enumerate(bc)]
if version.parse(tf.__version__) >= version.parse('1.14.0'):
coord = tf.stack(coord, -1)
return tf.gather_nd(inputs, coord, batch_dims=1) # NaN can cause negative integers here
else:
coord = tf.stack([batch_ids] + coord, -1)
return tf.gather_nd(inputs, coord) # NaN can cause negative integers here
samples = [get_knot(bc) for bc in binary_neighbour_ids]
def _pyramid_combination(samples, w_0, w_1):
if len(w_0) == 1:
return samples[0] * w_1[0] + samples[1] * w_0[0]
f_0 = _pyramid_combination(samples[::2], w_0[:-1], w_1[:-1])
f_1 = _pyramid_combination(samples[1::2], w_0[:-1], w_1[:-1])
return f_0 * w_1[-1] + f_1 * w_0[-1]
return _pyramid_combination(samples, weight_0, weight_1)
def _boundary_snap(sample_coords, spatial_shape):
max_indices = [l - 1 for l in spatial_shape]
for _i in range(len(spatial_shape)):
max_indices = tf.expand_dims(max_indices, 0)
sample_coords = tf.minimum(sample_coords, max_indices)
sample_coords = tf.maximum(sample_coords, 0)
return sample_coords
def _boundary_replicate(sample_coords, input_size):
return tf.maximum(tf.minimum(sample_coords, input_size - 1), 0)
def _boundary_circular(sample_coords, input_size):
return tf.mod(tf.mod(sample_coords, input_size) + input_size, input_size)
def _boundary_symmetric(sample_coords, input_size):
sample_coords = _boundary_circular(sample_coords, 2 * input_size)
return ((2 * input_size - 1) - tf.abs((2 * input_size - 1) - 2 * sample_coords)) // 2
def _boundary_reflect(sample_coords, input_size):
sample_coords = _boundary_circular(sample_coords, 2 * input_size - 2)
return (input_size - 1) - tf.abs((input_size - 1) - sample_coords)
SUPPORTED_BOUNDARY = {
'zero': _boundary_replicate,
'replicate': _boundary_replicate,
'circular': _boundary_circular,
'symmetric': _boundary_symmetric,
'reflect': _boundary_reflect,
}
| true
| true
|
f7172942f9d5947ca0af46c7c7e888e04cf6623f
| 6,675
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/flavobacteriumhydatis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/flavobacteriumhydatis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/flavobacteriumhydatis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Flavobacterium hydatis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 16:22:03.956985
The undirected graph Flavobacterium hydatis has 4606 nodes and 568880 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05364 and has 24 connected components, where the component with most
nodes has 4515 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 233, the mean node degree is 247.02, and
the node degree mode is 4. The top 5 most central nodes are 991.IW20_22165
(degree 1553), 991.IW20_14795 (degree 1495), 991.IW20_22855 (degree 1329),
991.IW20_07570 (degree 1326) and 991.IW20_03660 (degree 1320).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import FlavobacteriumHydatis
# Then load the graph
graph = FlavobacteriumHydatis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def FlavobacteriumHydatis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Flavobacterium hydatis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Flavobacterium hydatis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 16:22:03.956985
The undirected graph Flavobacterium hydatis has 4606 nodes and 568880 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05364 and has 24 connected components, where the component with most
nodes has 4515 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 233, the mean node degree is 247.02, and
the node degree mode is 4. The top 5 most central nodes are 991.IW20_22165
(degree 1553), 991.IW20_14795 (degree 1495), 991.IW20_22855 (degree 1329),
991.IW20_07570 (degree 1326) and 991.IW20_03660 (degree 1320).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import FlavobacteriumHydatis
# Then load the graph
graph = FlavobacteriumHydatis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="FlavobacteriumHydatis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.31746
| 223
| 0.703221
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def FlavobacteriumHydatis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="FlavobacteriumHydatis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
f7172b0b8c3ea7766ba1803624338fcfaab912a0
| 1,473
|
py
|
Python
|
Python/Algorithms/232.py
|
DimitrisJim/leetcode_solutions
|
765ea578748f8c9b21243dec9dc8a16163e85c0c
|
[
"Unlicense"
] | 2
|
2021-01-15T17:22:54.000Z
|
2021-05-16T19:58:02.000Z
|
Python/Algorithms/232.py
|
DimitrisJim/leetcode_solutions
|
765ea578748f8c9b21243dec9dc8a16163e85c0c
|
[
"Unlicense"
] | null | null | null |
Python/Algorithms/232.py
|
DimitrisJim/leetcode_solutions
|
765ea578748f8c9b21243dec9dc8a16163e85c0c
|
[
"Unlicense"
] | null | null | null |
class MyQueue:
def __init__(self):
""" Uses two stacks to implement a Queue. Storage holds elements
pushed right before the first pop.
"""
self.storage, self.tmp = [], []
def push(self, x: int) -> None:
""" Unconditionally add to storage. Equivalent to stack.push."""
self.storage.append(x)
def pop(self) -> int:
""" If tmp is empty, fill it up with all elements from storage, the
next N pops (where N is the total number of elements in storage before
filling tmp), will be O(1).
We only do this operation when tmp is empty. Uses append, pop which are
the Stacks equivalent of push, pop.
"""
tmp = self.tmp
if not tmp:
# could move in separate helper func
storage = self.storage
while storage:
tmp.append(storage.pop())
return tmp.pop()
def peek(self) -> int:
"""
Get the front element.
Uses list indexing with [-1] which is equivalent to stack.peek
"""
tmp = self.tmp
if not tmp:
# could move in separate helper func
storage = self.storage
while storage:
tmp.append(storage.pop())
return tmp[-1]
def empty(self) -> bool:
"""
Uses len(lst) == 0 which is equivalent to stack.is_empty
"""
return len(self.tmp) == len(self.storage) == 0
| 30.6875
| 79
| 0.551935
|
class MyQueue:
def __init__(self):
self.storage, self.tmp = [], []
def push(self, x: int) -> None:
self.storage.append(x)
def pop(self) -> int:
tmp = self.tmp
if not tmp:
storage = self.storage
while storage:
tmp.append(storage.pop())
return tmp.pop()
def peek(self) -> int:
tmp = self.tmp
if not tmp:
storage = self.storage
while storage:
tmp.append(storage.pop())
return tmp[-1]
def empty(self) -> bool:
return len(self.tmp) == len(self.storage) == 0
| true
| true
|
f7172cfeb48041bf769a78d5a1e56314062ba794
| 5,141
|
py
|
Python
|
library/f5bigip_net_route_domain.py
|
GabrielFortin/ansible-module-f5bigip
|
8d1323e912388e20eafd63a73ec015dd6d8a012c
|
[
"Apache-2.0"
] | 6
|
2017-01-11T01:28:00.000Z
|
2019-02-19T16:11:09.000Z
|
library/f5bigip_net_route_domain.py
|
GabrielFortin/ansible-module-f5bigip
|
8d1323e912388e20eafd63a73ec015dd6d8a012c
|
[
"Apache-2.0"
] | 48
|
2017-05-29T17:50:59.000Z
|
2020-02-09T15:24:27.000Z
|
library/f5bigip_net_route_domain.py
|
GabrielFortin/ansible-module-f5bigip
|
8d1323e912388e20eafd63a73ec015dd6d8a012c
|
[
"Apache-2.0"
] | 5
|
2017-05-05T18:30:51.000Z
|
2017-12-19T23:13:05.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_net_route_domain
short_description: BIG-IP net route-domain module
description:
- Configures route-domains for traffic management.
version_added: "2.4"
author:
- "Eric Jacob (@erjac77)"
options:
app_service:
description:
- Specifies the application service that the object belongs to.
bwc_policy:
description:
- Configures the bandwidth control policy for the route-domain.
connection_limit:
description:
- Configures the connection limit for the route domain.
default: 0
description:
description:
- Specifies descriptive text that identifies the component.
flow_eviction_policy:
description:
- Specifies a flow eviction policy for the route domain to use, to select which flows to evict when the
number of connections approaches the connection limit on the route domain.
id:
description:
- Specifies a unique numeric identifier for the route-domain.
name:
description:
- Specifies unique name for the component.
required: true
parent:
description:
- Specifies the route domain the system searches when it cannot find a route in the configured domain.
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
strict:
description:
- Specifies whether the system allows a connection to span route domains.
default: enabled
choices: ['disabled', 'enabled']
vlans:
description:
- Specifies VLANs, by name, for the system to use in the route domain.
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create NET Route-Domain
f5bigip_net_route_domain:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_route_domain
partition: Common
id: 1234
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
bwc_policy=dict(type='str'),
connection_limit=dict(type='int'),
description=dict(type='str'),
flow_eviction_policy=dict(type='str'),
fw_enforced_policy=dict(type='str'),
# fw_rules=dict(type='list'),
fw_staged_policy=dict(type='str'),
id=dict(type='int'),
parent=dict(type='str'),
routing_protocol=dict(type='list'),
strict=dict(type='str', choices=F5_ACTIVATION_CHOICES),
vlans=dict(type='list')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpNetRouteDomain(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.net.route_domains.route_domain.create,
'read': self._api.tm.net.route_domains.route_domain.load,
'update': self._api.tm.net.route_domains.route_domain.update,
'delete': self._api.tm.net.route_domains.route_domain.delete,
'exists': self._api.tm.net.route_domains.route_domain.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpNetRouteDomain(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| 32.333333
| 115
| 0.665435
|
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_net_route_domain
short_description: BIG-IP net route-domain module
description:
- Configures route-domains for traffic management.
version_added: "2.4"
author:
- "Eric Jacob (@erjac77)"
options:
app_service:
description:
- Specifies the application service that the object belongs to.
bwc_policy:
description:
- Configures the bandwidth control policy for the route-domain.
connection_limit:
description:
- Configures the connection limit for the route domain.
default: 0
description:
description:
- Specifies descriptive text that identifies the component.
flow_eviction_policy:
description:
- Specifies a flow eviction policy for the route domain to use, to select which flows to evict when the
number of connections approaches the connection limit on the route domain.
id:
description:
- Specifies a unique numeric identifier for the route-domain.
name:
description:
- Specifies unique name for the component.
required: true
parent:
description:
- Specifies the route domain the system searches when it cannot find a route in the configured domain.
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
strict:
description:
- Specifies whether the system allows a connection to span route domains.
default: enabled
choices: ['disabled', 'enabled']
vlans:
description:
- Specifies VLANs, by name, for the system to use in the route domain.
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create NET Route-Domain
f5bigip_net_route_domain:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_route_domain
partition: Common
id: 1234
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
bwc_policy=dict(type='str'),
connection_limit=dict(type='int'),
description=dict(type='str'),
flow_eviction_policy=dict(type='str'),
fw_enforced_policy=dict(type='str'),
fw_staged_policy=dict(type='str'),
id=dict(type='int'),
parent=dict(type='str'),
routing_protocol=dict(type='list'),
strict=dict(type='str', choices=F5_ACTIVATION_CHOICES),
vlans=dict(type='list')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpNetRouteDomain(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.net.route_domains.route_domain.create,
'read': self._api.tm.net.route_domains.route_domain.load,
'update': self._api.tm.net.route_domains.route_domain.update,
'delete': self._api.tm.net.route_domains.route_domain.delete,
'exists': self._api.tm.net.route_domains.route_domain.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpNetRouteDomain(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| true
| true
|
f7172d0eeab28f547639ac4e3e76bcd936be54db
| 1,149
|
py
|
Python
|
tensorlayer/layers/__init__.py
|
yazdotai/tensorlayer
|
dea9d4023b578b4452c3861618e46466d4553658
|
[
"Apache-2.0"
] | 1
|
2019-02-24T22:23:59.000Z
|
2019-02-24T22:23:59.000Z
|
tensorlayer/layers/__init__.py
|
yazdotai/tensorlayer
|
dea9d4023b578b4452c3861618e46466d4553658
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/layers/__init__.py
|
yazdotai/tensorlayer
|
dea9d4023b578b4452c3861618e46466d4553658
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
TensorLayer provides rich layer implementations trailed for
various benchmarks and domain-specific problems. In addition, we also
support transparent access to native TensorFlow parameters.
For example, we provide not only layers for local response normalization, but also
layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``.
More functions can be found in `TensorFlow API <https://www.tensorflow.org/versions/master/api_docs/index.html>`__.
"""
from .binary import *
from .convolution import *
from .core import *
from .dense import *
from .dropout import *
from .extend import *
from .flow_control import *
from .image_resize import *
from .importer import *
from .inputs import *
from .merge import *
from .noise import *
from .normalization import *
from .object_detection import *
from .padding import *
from .pooling import *
from .reconstruction import *
from .recurrent import *
from .shape import *
from .spatial_transformer import *
from .special_activation import *
from .stack import *
from .super_resolution import *
from .time_distribution import *
from .utils import *
| 31.054054
| 115
| 0.767624
|
from .binary import *
from .convolution import *
from .core import *
from .dense import *
from .dropout import *
from .extend import *
from .flow_control import *
from .image_resize import *
from .importer import *
from .inputs import *
from .merge import *
from .noise import *
from .normalization import *
from .object_detection import *
from .padding import *
from .pooling import *
from .reconstruction import *
from .recurrent import *
from .shape import *
from .spatial_transformer import *
from .special_activation import *
from .stack import *
from .super_resolution import *
from .time_distribution import *
from .utils import *
| true
| true
|
f7172d3dba51a3291b1ccf91d79c1b90f924a771
| 473
|
py
|
Python
|
app/ch13-validation/starter/pypi_org/views/cms_views.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 496
|
2019-07-03T05:13:24.000Z
|
2022-03-27T01:15:10.000Z
|
app/ch13-validation/starter/pypi_org/views/cms_views.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 28
|
2021-03-10T08:24:07.000Z
|
2022-03-02T07:26:39.000Z
|
app/ch13-validation/starter/pypi_org/views/cms_views.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 562
|
2019-07-03T14:35:21.000Z
|
2022-03-31T06:23:58.000Z
|
import flask
from pypi_org.infrastructure.view_modifiers import response
import pypi_org.services.cms_service as cms_service
blueprint = flask.Blueprint('cms', __name__, template_folder='templates')
@blueprint.route('/<path:full_url>')
@response(template_file='cms/page.html')
def cms_page(full_url: str):
print("Getting CMS page for {}".format(full_url))
page = cms_service.get_page(full_url)
if not page:
return flask.abort(404)
return page
| 24.894737
| 73
| 0.748414
|
import flask
from pypi_org.infrastructure.view_modifiers import response
import pypi_org.services.cms_service as cms_service
blueprint = flask.Blueprint('cms', __name__, template_folder='templates')
@blueprint.route('/<path:full_url>')
@response(template_file='cms/page.html')
def cms_page(full_url: str):
print("Getting CMS page for {}".format(full_url))
page = cms_service.get_page(full_url)
if not page:
return flask.abort(404)
return page
| true
| true
|
f7172eb93f55fa05275985968c5cd0ba4d4c38c0
| 966
|
py
|
Python
|
robosuite/models/arenas/hole_arena.py
|
junjungoal/robosuite
|
14a9a8672bb14145dd4586a0c0080e1d0d3ff74e
|
[
"MIT"
] | null | null | null |
robosuite/models/arenas/hole_arena.py
|
junjungoal/robosuite
|
14a9a8672bb14145dd4586a0c0080e1d0d3ff74e
|
[
"MIT"
] | null | null | null |
robosuite/models/arenas/hole_arena.py
|
junjungoal/robosuite
|
14a9a8672bb14145dd4586a0c0080e1d0d3ff74e
|
[
"MIT"
] | null | null | null |
from robosuite.models.arenas import TableArena
class HoleArena(TableArena):
"""
Workspace that contains a tabletop with two fixed pegs.
Args:
table_full_size (3-tuple): (L,W,H) full dimensions of the table
table_friction (3-tuple): (sliding, torsional, rolling) friction parameters of the table
table_offset (3-tuple): (x,y,z) offset from center of arena when placing table.
Note that the z value sets the upper limit of the table
"""
def __init__(
self,
table_full_size=(0.45, 0.69, 0.05),
table_friction=(1, 0.005, 0.0001),
table_offset=(0, 0, 0),
):
super().__init__(
table_full_size=table_full_size,
table_friction=table_friction,
table_offset=table_offset,
xml="arenas/hole_arena.xml",
)
# Get references to peg bodies
self.holebody = self.worldbody.find("./body[@name='hole_body']")
| 32.2
| 96
| 0.624224
|
from robosuite.models.arenas import TableArena
class HoleArena(TableArena):
def __init__(
self,
table_full_size=(0.45, 0.69, 0.05),
table_friction=(1, 0.005, 0.0001),
table_offset=(0, 0, 0),
):
super().__init__(
table_full_size=table_full_size,
table_friction=table_friction,
table_offset=table_offset,
xml="arenas/hole_arena.xml",
)
self.holebody = self.worldbody.find("./body[@name='hole_body']")
| true
| true
|
f7172fbd187e9bb300b57d617d3830a727c1ba69
| 3,748
|
py
|
Python
|
usaspending_api/idvs/v2/views/funding_rollup.py
|
gaybro8777/usaspending-api
|
fe9d730acd632401bbbefa168e3d86d59560314b
|
[
"CC0-1.0"
] | 1
|
2020-06-15T19:59:52.000Z
|
2020-06-15T19:59:52.000Z
|
usaspending_api/idvs/v2/views/funding_rollup.py
|
gaybro8777/usaspending-api
|
fe9d730acd632401bbbefa168e3d86d59560314b
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/idvs/v2/views/funding_rollup.py
|
gaybro8777/usaspending-api
|
fe9d730acd632401bbbefa168e3d86d59560314b
|
[
"CC0-1.0"
] | null | null | null |
from collections import OrderedDict
from psycopg2.sql import Identifier, Literal, SQL
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.helpers.sql_helpers import execute_sql_to_ordered_dictionary
from usaspending_api.common.validator.award import get_internal_or_generated_award_id_model
from usaspending_api.common.validator.tinyshield import validate_post_request
# As per direction from the product owner, agency data is to be retrieved from
# the File D (awards) data not File C (financial_accounts_by_awards). Also,
# even though this query structure looks terrible, it managed to boost
# performance a bit.
ROLLUP_SQL = SQL(
"""
with gather_award_ids as (
select award_id
from parent_award
where {award_id_column} = {award_id}
union all
select cpa.award_id
from parent_award ppa
inner join parent_award cpa on cpa.parent_award_id = ppa.award_id
where ppa.{award_id_column} = {award_id}
), gather_awards as (
select ca.id award_id,
ca.awarding_agency_id,
ca.funding_agency_id
from gather_award_ids gaids
inner join awards pa on pa.id = gaids.award_id
inner join awards ca on
ca.parent_award_piid = pa.piid and
ca.fpds_parent_agency_id = pa.fpds_agency_id and
ca.type not like 'IDV%'
), gather_financial_accounts_by_awards as (
select ga.awarding_agency_id,
ga.funding_agency_id,
nullif(faba.transaction_obligated_amount, 'NaN') transaction_obligated_amount,
faba.treasury_account_id
from gather_awards ga
inner join financial_accounts_by_awards faba on faba.award_id = ga.award_id
)
select
coalesce(sum(gfaba.transaction_obligated_amount), 0.0) total_transaction_obligated_amount,
count(distinct aa.toptier_agency_id) awarding_agency_count,
count(distinct af.toptier_agency_id) funding_agency_count,
count(distinct taa.agency_id || '-' || taa.main_account_code) federal_account_count
from
gather_financial_accounts_by_awards gfaba
left outer join treasury_appropriation_account taa on
taa.treasury_account_identifier = gfaba.treasury_account_id
left outer join agency aa on aa.id = gfaba.awarding_agency_id
left outer join agency af on af.id = gfaba.funding_agency_id
"""
)
@validate_post_request([get_internal_or_generated_award_id_model()])
class IDVFundingRollupViewSet(APIView):
"""
Returns File C funding totals associated with an IDV's children.
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/idvs/funding_rollup.md"
@staticmethod
def _business_logic(request_data: dict) -> OrderedDict:
# By this point, our award_id has been validated and cleaned up by
# TinyShield. We will either have an internal award id that is an
# integer or a generated award id that is a string.
award_id = request_data["award_id"]
award_id_column = "award_id" if type(award_id) is int else "generated_unique_award_id"
sql = ROLLUP_SQL.format(award_id_column=Identifier(award_id_column), award_id=Literal(award_id))
return execute_sql_to_ordered_dictionary(sql)[0]
@cache_response()
def post(self, request: Request) -> Response:
return Response(self._business_logic(request.data))
| 44.094118
| 107
| 0.702241
|
from collections import OrderedDict
from psycopg2.sql import Identifier, Literal, SQL
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.helpers.sql_helpers import execute_sql_to_ordered_dictionary
from usaspending_api.common.validator.award import get_internal_or_generated_award_id_model
from usaspending_api.common.validator.tinyshield import validate_post_request
ROLLUP_SQL = SQL(
"""
with gather_award_ids as (
select award_id
from parent_award
where {award_id_column} = {award_id}
union all
select cpa.award_id
from parent_award ppa
inner join parent_award cpa on cpa.parent_award_id = ppa.award_id
where ppa.{award_id_column} = {award_id}
), gather_awards as (
select ca.id award_id,
ca.awarding_agency_id,
ca.funding_agency_id
from gather_award_ids gaids
inner join awards pa on pa.id = gaids.award_id
inner join awards ca on
ca.parent_award_piid = pa.piid and
ca.fpds_parent_agency_id = pa.fpds_agency_id and
ca.type not like 'IDV%'
), gather_financial_accounts_by_awards as (
select ga.awarding_agency_id,
ga.funding_agency_id,
nullif(faba.transaction_obligated_amount, 'NaN') transaction_obligated_amount,
faba.treasury_account_id
from gather_awards ga
inner join financial_accounts_by_awards faba on faba.award_id = ga.award_id
)
select
coalesce(sum(gfaba.transaction_obligated_amount), 0.0) total_transaction_obligated_amount,
count(distinct aa.toptier_agency_id) awarding_agency_count,
count(distinct af.toptier_agency_id) funding_agency_count,
count(distinct taa.agency_id || '-' || taa.main_account_code) federal_account_count
from
gather_financial_accounts_by_awards gfaba
left outer join treasury_appropriation_account taa on
taa.treasury_account_identifier = gfaba.treasury_account_id
left outer join agency aa on aa.id = gfaba.awarding_agency_id
left outer join agency af on af.id = gfaba.funding_agency_id
"""
)
@validate_post_request([get_internal_or_generated_award_id_model()])
class IDVFundingRollupViewSet(APIView):
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/idvs/funding_rollup.md"
@staticmethod
def _business_logic(request_data: dict) -> OrderedDict:
award_id = request_data["award_id"]
award_id_column = "award_id" if type(award_id) is int else "generated_unique_award_id"
sql = ROLLUP_SQL.format(award_id_column=Identifier(award_id_column), award_id=Literal(award_id))
return execute_sql_to_ordered_dictionary(sql)[0]
@cache_response()
def post(self, request: Request) -> Response:
return Response(self._business_logic(request.data))
| true
| true
|
f7173014b1cb6f863746c4e0773f7adabde24751
| 1,474
|
py
|
Python
|
test/test_runner_change.py
|
akxlr/bf-stream-py
|
f2d5fb1afbfcae61713f2414da3e40d326960e58
|
[
"Apache-2.0"
] | null | null | null |
test/test_runner_change.py
|
akxlr/bf-stream-py
|
f2d5fb1afbfcae61713f2414da3e40d326960e58
|
[
"Apache-2.0"
] | null | null | null |
test/test_runner_change.py
|
akxlr/bf-stream-py
|
f2d5fb1afbfcae61713f2414da3e40d326960e58
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Betfair: Exchange Streaming API
API to receive streamed updates. This is an ssl socket connection of CRLF delimited json messages (see RequestMessage & ResponseMessage)
OpenAPI spec version: 1.0.1423
Contact: bdp@betfair.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.runner_change import RunnerChange
class TestRunnerChange(unittest.TestCase):
""" RunnerChange unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testRunnerChange(self):
"""
Test RunnerChange
"""
model = swagger_client.models.runner_change.RunnerChange()
if __name__ == '__main__':
unittest.main()
| 27.811321
| 140
| 0.718453
|
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.runner_change import RunnerChange
class TestRunnerChange(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testRunnerChange(self):
model = swagger_client.models.runner_change.RunnerChange()
if __name__ == '__main__':
unittest.main()
| true
| true
|
f717303bc1bbfdb991baae18496729085b8816f6
| 1,283
|
py
|
Python
|
jupyterlab/PrettifyPage.py
|
Larz60p/MakerProjectApril2019
|
2fd4d68aa66c1f4ad3b01f6a9589a078319280d7
|
[
"MIT"
] | 1
|
2019-04-25T22:53:52.000Z
|
2019-04-25T22:53:52.000Z
|
jupyterlab/PrettifyPage.py
|
Larz60p/MakerProjectApril2019
|
2fd4d68aa66c1f4ad3b01f6a9589a078319280d7
|
[
"MIT"
] | null | null | null |
jupyterlab/PrettifyPage.py
|
Larz60p/MakerProjectApril2019
|
2fd4d68aa66c1f4ad3b01f6a9589a078319280d7
|
[
"MIT"
] | null | null | null |
# PrettifyPage.py
from bs4 import BeautifulSoup
import requests
import BusinessPaths
import pathlib
class PrettifyPage:
def __init__(self):
self.bpath = BusinessPaths.BusinessPaths()
def prettify(self, soup, indent):
pretty_soup = str()
previous_indent = 0
for line in soup.prettify().split("\n"):
current_indent = str(line).find("<")
if current_indent == -1 or current_indent > previous_indent + 2:
current_indent = previous_indent + 1
previous_indent = current_indent
pretty_soup += self.write_new_line(line, current_indent, indent)
return pretty_soup
def write_new_line(self, line, current_indent, desired_indent):
new_line = ""
spaces_to_add = (current_indent * desired_indent) - current_indent
if spaces_to_add > 0:
for i in range(spaces_to_add):
new_line += " "
new_line += str(line) + "\n"
return new_line
if __name__ == '__main__':
pp = PrettifyPage()
pfilename = pp.bpath.htmlpath / 'BusinessEntityRecordsAA.html'
with pfilename.open('rb') as fp:
page = fp.read()
soup = BeautifulSoup(page, 'lxml')
pretty = pp.prettify(soup, indent=2)
print(pretty)
| 31.292683
| 76
| 0.632112
|
from bs4 import BeautifulSoup
import requests
import BusinessPaths
import pathlib
class PrettifyPage:
def __init__(self):
self.bpath = BusinessPaths.BusinessPaths()
def prettify(self, soup, indent):
pretty_soup = str()
previous_indent = 0
for line in soup.prettify().split("\n"):
current_indent = str(line).find("<")
if current_indent == -1 or current_indent > previous_indent + 2:
current_indent = previous_indent + 1
previous_indent = current_indent
pretty_soup += self.write_new_line(line, current_indent, indent)
return pretty_soup
def write_new_line(self, line, current_indent, desired_indent):
new_line = ""
spaces_to_add = (current_indent * desired_indent) - current_indent
if spaces_to_add > 0:
for i in range(spaces_to_add):
new_line += " "
new_line += str(line) + "\n"
return new_line
if __name__ == '__main__':
pp = PrettifyPage()
pfilename = pp.bpath.htmlpath / 'BusinessEntityRecordsAA.html'
with pfilename.open('rb') as fp:
page = fp.read()
soup = BeautifulSoup(page, 'lxml')
pretty = pp.prettify(soup, indent=2)
print(pretty)
| true
| true
|
f71730a095f88ee9dd10f077cd9170c324f0f4c1
| 755
|
py
|
Python
|
src/metadata/ruby/parse_author_package.py
|
Yanivmd/maloss
|
af85ac202668da88d0b4a885386a1e56703e37c8
|
[
"MIT"
] | 1
|
2022-01-29T16:13:06.000Z
|
2022-01-29T16:13:06.000Z
|
src/metadata/ruby/parse_author_package.py
|
Yanivmd/maloss
|
af85ac202668da88d0b4a885386a1e56703e37c8
|
[
"MIT"
] | null | null | null |
src/metadata/ruby/parse_author_package.py
|
Yanivmd/maloss
|
af85ac202668da88d0b4a885386a1e56703e37c8
|
[
"MIT"
] | 1
|
2022-01-29T16:13:07.000Z
|
2022-01-29T16:13:07.000Z
|
import json
name_site = {}
with open('rubygems_metadata.txt') as json_file:
data = json.load(json_file)
for p1 in data['ruby_package']:
dep = p1['dependencies']
if dep:
for val in dep:
print val
else:
print "list is empty..."
if p1['author'] in name_site:
continue
names = []
names.append(p1['author'])
for p2 in data['ruby_package']:
#print p2['name']
if p1['author'] == p2['author'] and p1['name'] != p2['name']:
#print p1['author']
names.append(p2['name'])
name_site[p1['author']]=names
with open('author_package_ruby.txt', 'a') as f:
for key, value in name_site.items():
f.write('%s:%s\n' % (key, value))
| 27.962963
| 73
| 0.54702
|
import json
name_site = {}
with open('rubygems_metadata.txt') as json_file:
data = json.load(json_file)
for p1 in data['ruby_package']:
dep = p1['dependencies']
if dep:
for val in dep:
print val
else:
print "list is empty..."
if p1['author'] in name_site:
continue
names = []
names.append(p1['author'])
for p2 in data['ruby_package']:
if p1['author'] == p2['author'] and p1['name'] != p2['name']:
names.append(p2['name'])
name_site[p1['author']]=names
with open('author_package_ruby.txt', 'a') as f:
for key, value in name_site.items():
f.write('%s:%s\n' % (key, value))
| false
| true
|
f71730b686a7fb863fdc989a73270099f5159e60
| 2,381
|
py
|
Python
|
Python 3 Codes/bert_embed.py
|
Nitinram23/text-to-image
|
f819bed3dffbccd8e20b03741e3f67178729812b
|
[
"MIT"
] | null | null | null |
Python 3 Codes/bert_embed.py
|
Nitinram23/text-to-image
|
f819bed3dffbccd8e20b03741e3f67178729812b
|
[
"MIT"
] | null | null | null |
Python 3 Codes/bert_embed.py
|
Nitinram23/text-to-image
|
f819bed3dffbccd8e20b03741e3f67178729812b
|
[
"MIT"
] | null | null | null |
from bert_embedding import BertEmbedding
import numpy as np
import pickle
import argparse
import json
import os
from os.path import join, isfile
import re
import h5py
def save_caption_vectors_flowers(data_dir):
import time
img_dir = join(data_dir, 'flowers/jpg')
image_files = [f for f in os.listdir(img_dir) if 'jpg' in f]
# print(image_files[300:400])
# print(len(image_files))
image_captions = { img_file : [] for img_file in image_files }
caption_dir = join(data_dir, 'flowers/text_c10')
class_dirs = []
for i in range(1, 103):
class_dir_name = 'class_%.5d'%(i)
class_dirs.append( join(caption_dir, class_dir_name))
for class_dir in class_dirs:
caption_files = [f for f in os.listdir(class_dir) if 'txt' in f]
for cap_file in caption_files:
with open(join(class_dir,cap_file)) as f:
captions = f.read().split('\n')
img_file = cap_file[0:11] + ".jpg"
# 5 captions per image
image_captions[img_file] += [cap for cap in captions if len(cap) > 0][0:5]
encoded_captions = {}
bert_embedding = BertEmbedding()
for i, img in enumerate(image_captions):
st = time.time()
embed_list = []
embed_sum = np.zeros(768)
embedding = bert_embedding(image_captions[img],'avg')
for sent in range(len(image_captions[img])):
word_embed_list = embedding[sent][1]
for word_embed in word_embed_list:
embed_sum += word_embed
embed_list.append(embed_sum/len(word_embed_list))
embed_list_np = np.asarray(embed_list)
encoded_captions[img] = embed_list_np
print(i, len(image_captions), img)
print("Seconds", time.time() - st)
h = h5py.File(join(data_dir, 'flower_bert.hdf5'))
for key in encoded_captions:
h.create_dataset(key, data=encoded_captions[key])
h.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--split', type=str, default='train',
help='train/val')
parser.add_argument('--data_dir', type=str, default='Data',
help='Data directory')
parser.add_argument('--batch_size', type=int, default=64,
help='Batch Size')
parser.add_argument('--data_set', type=str, default='flowers',
help='Data Set : Flowers, MS-COCO')
args = parser.parse_args()
if args.data_set == 'flowers':
save_caption_vectors_flowers(args.data_dir)
else:
print('incorrect data')
if __name__ == '__main__':
main()
| 30.139241
| 77
| 0.692986
|
from bert_embedding import BertEmbedding
import numpy as np
import pickle
import argparse
import json
import os
from os.path import join, isfile
import re
import h5py
def save_caption_vectors_flowers(data_dir):
import time
img_dir = join(data_dir, 'flowers/jpg')
image_files = [f for f in os.listdir(img_dir) if 'jpg' in f]
image_captions = { img_file : [] for img_file in image_files }
caption_dir = join(data_dir, 'flowers/text_c10')
class_dirs = []
for i in range(1, 103):
class_dir_name = 'class_%.5d'%(i)
class_dirs.append( join(caption_dir, class_dir_name))
for class_dir in class_dirs:
caption_files = [f for f in os.listdir(class_dir) if 'txt' in f]
for cap_file in caption_files:
with open(join(class_dir,cap_file)) as f:
captions = f.read().split('\n')
img_file = cap_file[0:11] + ".jpg"
image_captions[img_file] += [cap for cap in captions if len(cap) > 0][0:5]
encoded_captions = {}
bert_embedding = BertEmbedding()
for i, img in enumerate(image_captions):
st = time.time()
embed_list = []
embed_sum = np.zeros(768)
embedding = bert_embedding(image_captions[img],'avg')
for sent in range(len(image_captions[img])):
word_embed_list = embedding[sent][1]
for word_embed in word_embed_list:
embed_sum += word_embed
embed_list.append(embed_sum/len(word_embed_list))
embed_list_np = np.asarray(embed_list)
encoded_captions[img] = embed_list_np
print(i, len(image_captions), img)
print("Seconds", time.time() - st)
h = h5py.File(join(data_dir, 'flower_bert.hdf5'))
for key in encoded_captions:
h.create_dataset(key, data=encoded_captions[key])
h.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--split', type=str, default='train',
help='train/val')
parser.add_argument('--data_dir', type=str, default='Data',
help='Data directory')
parser.add_argument('--batch_size', type=int, default=64,
help='Batch Size')
parser.add_argument('--data_set', type=str, default='flowers',
help='Data Set : Flowers, MS-COCO')
args = parser.parse_args()
if args.data_set == 'flowers':
save_caption_vectors_flowers(args.data_dir)
else:
print('incorrect data')
if __name__ == '__main__':
main()
| true
| true
|
f71730dab83cd1a03f3141bd046a4fc4403b2874
| 1,886
|
py
|
Python
|
libs/play_db.py
|
fangMint/django_web
|
cf50df6c1a2358996620ac83ffa99b31472d3c07
|
[
"AFL-3.0"
] | null | null | null |
libs/play_db.py
|
fangMint/django_web
|
cf50df6c1a2358996620ac83ffa99b31472d3c07
|
[
"AFL-3.0"
] | null | null | null |
libs/play_db.py
|
fangMint/django_web
|
cf50df6c1a2358996620ac83ffa99b31472d3c07
|
[
"AFL-3.0"
] | 1
|
2021-11-22T10:15:50.000Z
|
2021-11-22T10:15:50.000Z
|
# ==================================
# Author : fang
# Time : 2020/4/8 pm 8:55
# Email : zhen.fang@qdreamer.com
# File : play_db.py
# Software : PyCharm
# ==================================
import datetime
DB = {}
class PlayDB:
def __init__(self, inherited=False):
if inherited:
self.__store = DB # 数据暂存空间
else:
self.__store = {}
@staticmethod
def __timestamp():
time_stamp = datetime.datetime.now().timestamp()
time_stamp = int(round(time_stamp * 3000))
return time_stamp
def save(self, **kwargs):
tc = 0
for k, v in kwargs.items():
tc += 1
if not self.key_is_exists(k):
db_data = {"value": v, "data_stamp": self.__timestamp()}
else:
db_data = {"value": v, "data_stamp": self.__store[k]["data_stamp"]}
self.__store[k] = db_data
return tc
def delete(self, key):
if self.key_is_exists(key):
tv = self.__store.get(key)
del self.__store[key]
return {key: tv}
return False
def __get_or_consume(self, key, _all=False, _d=False):
if self.key_is_exists(key):
if not _all:
this_value = self.__store.get(key)["value"]
else:
this_value = self.__store.get(key)
if _d:
self.delete(key)
return this_value
raise ValueError(f"{key} does not exists in store")
def update(self, **kwargs):
return self.save(**kwargs)
def get(self, key):
return self.__get_or_consume(key, _all=False, _d=False)
def consume(self, key):
return self.__get_or_consume(key, _all=False, _d=True)
def key_is_exists(self, key):
return key in self.__store.keys()
play_global = PlayDB(inherited=True)
| 27.333333
| 83
| 0.534995
|
import datetime
DB = {}
class PlayDB:
def __init__(self, inherited=False):
if inherited:
self.__store = DB
else:
self.__store = {}
@staticmethod
def __timestamp():
time_stamp = datetime.datetime.now().timestamp()
time_stamp = int(round(time_stamp * 3000))
return time_stamp
def save(self, **kwargs):
tc = 0
for k, v in kwargs.items():
tc += 1
if not self.key_is_exists(k):
db_data = {"value": v, "data_stamp": self.__timestamp()}
else:
db_data = {"value": v, "data_stamp": self.__store[k]["data_stamp"]}
self.__store[k] = db_data
return tc
def delete(self, key):
if self.key_is_exists(key):
tv = self.__store.get(key)
del self.__store[key]
return {key: tv}
return False
def __get_or_consume(self, key, _all=False, _d=False):
if self.key_is_exists(key):
if not _all:
this_value = self.__store.get(key)["value"]
else:
this_value = self.__store.get(key)
if _d:
self.delete(key)
return this_value
raise ValueError(f"{key} does not exists in store")
def update(self, **kwargs):
return self.save(**kwargs)
def get(self, key):
return self.__get_or_consume(key, _all=False, _d=False)
def consume(self, key):
return self.__get_or_consume(key, _all=False, _d=True)
def key_is_exists(self, key):
return key in self.__store.keys()
play_global = PlayDB(inherited=True)
| true
| true
|
f71730fb4a31346934ff7ec9a4083bfc3b7a99ee
| 16,084
|
py
|
Python
|
pandas/tests/reshape/test_cut.py
|
developing-coder/pandas
|
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
[
"BSD-3-Clause"
] | 1
|
2019-05-04T03:42:25.000Z
|
2019-05-04T03:42:25.000Z
|
pandas/tests/reshape/test_cut.py
|
developing-coder/pandas
|
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/reshape/test_cut.py
|
developing-coder/pandas
|
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
[
"BSD-3-Clause"
] | 1
|
2020-01-02T14:28:17.000Z
|
2020-01-02T14:28:17.000Z
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, Interval, IntervalIndex,
Series, TimedeltaIndex, Timestamp, cut, date_range, isna, qcut,
timedelta_range, to_datetime)
from pandas.api.types import CategoricalDtype as CDT
import pandas.core.reshape.tile as tmod
import pandas.util.testing as tm
def test_simple():
data = np.ones(5, dtype="int64")
result = cut(data, 4, labels=False)
expected = np.array([1, 1, 1, 1, 1])
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_bins():
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_right():
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
expected = Categorical(intervals, ordered=True)
expected = expected.take([0, 0, 0, 2, 3, 0, 0])
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7]))
def test_no_right():
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3), closed="left")
intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095]))
def test_array_like():
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_bins_from_interval_index():
c = cut(range(5), 3)
expected = c
result = cut(range(5), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
expected = Categorical.from_codes(np.append(c.codes, -1),
categories=c.categories,
ordered=True)
result = cut(range(6), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
def test_bins_from_interval_index_doc_example():
# Make sure we preserve the bins.
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
c = cut(ages, bins=[0, 18, 35, 70])
expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])
tm.assert_index_equal(c.categories, expected)
result = cut([25, 20, 50], bins=c.categories)
tm.assert_index_equal(result.categories, expected)
tm.assert_numpy_array_equal(result.codes,
np.array([1, 1, 2], dtype="int8"))
def test_bins_not_overlapping_from_interval_index():
# see gh-23980
msg = "Overlapping IntervalIndex is not accepted"
ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)])
with pytest.raises(ValueError, match=msg):
cut([5, 6], bins=ii)
def test_bins_not_monotonic():
msg = "bins must increase monotonically"
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
with pytest.raises(ValueError, match=msg):
cut(data, [0.1, 1.5, 1, 10])
@pytest.mark.parametrize("x, bins, expected", [
(date_range("2017-12-31", periods=3),
[Timestamp.min, Timestamp('2018-01-01'), Timestamp.max],
IntervalIndex.from_tuples([
(Timestamp.min, Timestamp('2018-01-01')),
(Timestamp('2018-01-01'), Timestamp.max)])),
([-1, 0, 1],
np.array([np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max],
dtype="int64"),
IntervalIndex.from_tuples([
(np.iinfo(np.int64).min, 0),
(0, np.iinfo(np.int64).max)])),
([np.timedelta64(-1), np.timedelta64(0), np.timedelta64(1)],
np.array([
np.timedelta64(-np.iinfo(np.int64).max),
np.timedelta64(0),
np.timedelta64(np.iinfo(np.int64).max)]),
IntervalIndex.from_tuples([
(np.timedelta64(-np.iinfo(np.int64).max), np.timedelta64(0)),
(np.timedelta64(0), np.timedelta64(np.iinfo(np.int64).max))])),
])
def test_bins_monotonic_not_overflowing(x, bins, expected):
# GH 26045
result = cut(x, bins)
tm.assert_index_equal(result.categories, expected)
def test_wrong_num_labels():
msg = "Bin labels must be one fewer than the number of bin edges"
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
with pytest.raises(ValueError, match=msg):
cut(data, [0, 1, 10], labels=["foo", "bar", "baz"])
@pytest.mark.parametrize("x,bins,msg", [
([], 2, "Cannot cut empty array"),
([1, 2, 3], 0.5, "`bins` should be a positive integer")
])
def test_cut_corner(x, bins, msg):
with pytest.raises(ValueError, match=msg):
cut(x, bins)
@pytest.mark.parametrize("arg", [2, np.eye(2), DataFrame(np.eye(2))])
@pytest.mark.parametrize("cut_func", [cut, qcut])
def test_cut_not_1d_arg(arg, cut_func):
msg = "Input array must be 1 dimensional"
with pytest.raises(ValueError, match=msg):
cut_func(arg, 2)
@pytest.mark.parametrize('data', [
[0, 1, 2, 3, 4, np.inf],
[-np.inf, 0, 1, 2, 3, 4],
[-np.inf, 0, 1, 2, 3, 4, np.inf]])
def test_int_bins_with_inf(data):
# GH 24314
msg = 'cannot specify integer `bins` when input data contains infinity'
with pytest.raises(ValueError, match=msg):
cut(data, bins=3)
def test_cut_out_of_range_more():
# see gh-1511
name = "x"
ser = Series([0, -1, 0, 1, -3], name=name)
ind = cut(ser, [0, 1], labels=False)
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name)
tm.assert_series_equal(ind, exp)
@pytest.mark.parametrize("right,breaks,closed", [
(True, [-1e-3, 0.25, 0.5, 0.75, 1], "right"),
(False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], "left")
])
def test_labels(right, breaks, closed):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True, right=right)
ex_levels = IntervalIndex.from_breaks(breaks, closed=closed)
tm.assert_index_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor():
name = "foo"
ser = Series(np.random.randn(100), name=name)
factor = cut(ser, 4)
assert factor.name == name
def test_label_precision():
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72])
tm.assert_index_equal(result.categories, ex_levels)
@pytest.mark.parametrize("labels", [None, False])
def test_na_handling(labels):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4, labels=labels)
result = np.asarray(result)
expected = np.where(isna(arr), np.nan, result)
tm.assert_almost_equal(result, expected)
def test_inf_handling():
data = np.arange(6)
data_ser = Series(data, dtype="int64")
bins = [-np.inf, 2, 4, np.inf]
result = cut(data, bins)
result_ser = cut(data_ser, bins)
ex_uniques = IntervalIndex.from_breaks(bins)
tm.assert_index_equal(result.categories, ex_uniques)
assert result[5] == Interval(4, np.inf)
assert result[0] == Interval(-np.inf, 2)
assert result_ser[5] == Interval(4, np.inf)
assert result_ser[0] == Interval(-np.inf, 2)
def test_cut_out_of_bounds():
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = isna(result)
ex_mask = (arr < -1) | (arr > 1)
tm.assert_numpy_array_equal(mask, ex_mask)
@pytest.mark.parametrize("get_labels,get_expected", [
(lambda labels: labels,
lambda labels: Categorical(["Medium"] + 4 * ["Small"] +
["Medium", "Large"],
categories=labels, ordered=True)),
(lambda labels: Categorical.from_codes([0, 1, 2], labels),
lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels))
])
def test_cut_pass_labels(get_labels, get_expected):
bins = [0, 25, 50, 100]
arr = [50, 5, 10, 15, 20, 30, 70]
labels = ["Small", "Medium", "Large"]
result = cut(arr, bins, labels=get_labels(labels))
tm.assert_categorical_equal(result, get_expected(labels))
def test_cut_pass_labels_compat():
# see gh-16459
arr = [50, 5, 10, 15, 20, 30, 70]
labels = ["Good", "Medium", "Bad"]
result = cut(arr, 3, labels=labels)
exp = cut(arr, 3, labels=Categorical(labels, categories=labels,
ordered=True))
tm.assert_categorical_equal(result, exp)
@pytest.mark.parametrize("x", [np.arange(11.), np.arange(11.) / 1e10])
def test_round_frac_just_works(x):
# It works.
cut(x, 2)
@pytest.mark.parametrize("val,precision,expected", [
(-117.9998, 3, -118),
(117.9998, 3, 118),
(117.9998, 2, 118),
(0.000123456, 2, 0.00012)
])
def test_round_frac(val, precision, expected):
# see gh-1979
result = tmod._round_frac(val, precision=precision)
assert result == expected
def test_cut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
result = cut(ser, 3)
exp_bins = np.linspace(0, 8, num=4).round(3)
exp_bins[0] -= 0.008
expected = Series(IntervalIndex.from_breaks(exp_bins, closed="right").take(
[0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_series_ret_bins():
# see gh-8589
ser = Series(np.arange(4))
result, bins = cut(ser, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.003, 1.5, 3], closed="right").repeat(2)).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs,msg", [
(dict(duplicates="drop"), None),
(dict(), "Bin edges must be unique"),
(dict(duplicates="raise"), "Bin edges must be unique"),
(dict(duplicates="foo"), "invalid value for 'duplicates' parameter")
])
def test_cut_duplicates_bin(kwargs, msg):
# see gh-20947
bins = [0, 2, 4, 6, 10, 10]
values = Series(np.array([1, 3, 5, 7, 9]), index=["a", "b", "c", "d", "e"])
if msg is not None:
with pytest.raises(ValueError, match=msg):
cut(values, bins, **kwargs)
else:
result = cut(values, bins, **kwargs)
expected = cut(values, pd.unique(bins))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [9.0, -9.0, 0.0])
@pytest.mark.parametrize("length", [1, 2])
def test_single_bin(data, length):
# see gh-14652, gh-15428
ser = Series([data] * length)
result = cut(ser, 1, labels=False)
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"array_1_writeable,array_2_writeable",
[(True, True), (True, False), (False, False)])
def test_cut_read_only(array_1_writeable, array_2_writeable):
# issue 18773
array_1 = np.arange(0, 100, 10)
array_1.flags.writeable = array_1_writeable
array_2 = np.arange(0, 100, 10)
array_2.flags.writeable = array_2_writeable
hundred_elements = np.arange(100)
tm.assert_categorical_equal(cut(hundred_elements, array_1),
cut(hundred_elements, array_2))
@pytest.mark.parametrize("conv", [
lambda v: Timestamp(v),
lambda v: to_datetime(v),
lambda v: np.datetime64(v),
lambda v: Timestamp(v).to_pydatetime(),
])
def test_datetime_bin(conv):
data = [np.datetime64("2012-12-13"), np.datetime64("2012-12-15")]
bin_data = ["2012-12-12", "2012-12-14", "2012-12-16"]
expected = Series(IntervalIndex([
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))])).astype(
CDT(ordered=True))
bins = [conv(v) for v in bin_data]
result = Series(cut(data, bins=bins))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [
to_datetime(Series(["2013-01-01", "2013-01-02", "2013-01-03"])),
[np.datetime64("2013-01-01"), np.datetime64("2013-01-02"),
np.datetime64("2013-01-03")],
np.array([np.datetime64("2013-01-01"), np.datetime64("2013-01-02"),
np.datetime64("2013-01-03")]),
DatetimeIndex(["2013-01-01", "2013-01-02", "2013-01-03"])
])
def test_datetime_cut(data):
# see gh-14714
#
# Testing time data when it comes in various collection types.
result, _ = cut(data, 3, retbins=True)
expected = Series(IntervalIndex([
Interval(Timestamp("2012-12-31 23:57:07.200000"),
Timestamp("2013-01-01 16:00:00")),
Interval(Timestamp("2013-01-01 16:00:00"),
Timestamp("2013-01-02 08:00:00")),
Interval(Timestamp("2013-01-02 08:00:00"),
Timestamp("2013-01-03 00:00:00"))])).astype(CDT(ordered=True))
tm.assert_series_equal(Series(result), expected)
@pytest.mark.parametrize("bins", [
3, [Timestamp("2013-01-01 04:57:07.200000"),
Timestamp("2013-01-01 21:00:00"),
Timestamp("2013-01-02 13:00:00"),
Timestamp("2013-01-03 05:00:00")]])
@pytest.mark.parametrize("box", [list, np.array, Index, Series])
def test_datetime_tz_cut(bins, box):
# see gh-19872
tz = "US/Eastern"
s = Series(date_range("20130101", periods=3, tz=tz))
if not isinstance(bins, int):
bins = box(bins)
result = cut(s, bins)
expected = Series(IntervalIndex([
Interval(Timestamp("2012-12-31 23:57:07.200000", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz)),
Interval(Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz)),
Interval(Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz))])).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_datetime_nan_error():
msg = "bins must be of datetime64 dtype"
with pytest.raises(ValueError, match=msg):
cut(date_range("20130101", periods=3), bins=[0, 2, 4])
def test_datetime_nan_mask():
result = cut(date_range("20130102", periods=5),
bins=date_range("20130101", periods=2))
mask = result.categories.isna()
tm.assert_numpy_array_equal(mask, np.array([False]))
mask = result.isna()
tm.assert_numpy_array_equal(mask, np.array([False, True, True,
True, True]))
@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"])
def test_datetime_cut_roundtrip(tz):
# see gh-19891
ser = Series(date_range("20180101", periods=3, tz=tz))
result, result_bins = cut(ser, 2, retbins=True)
expected = cut(ser, result_bins)
tm.assert_series_equal(result, expected)
expected_bins = DatetimeIndex(["2017-12-31 23:57:07.200000",
"2018-01-02 00:00:00",
"2018-01-03 00:00:00"])
expected_bins = expected_bins.tz_localize(tz)
tm.assert_index_equal(result_bins, expected_bins)
def test_timedelta_cut_roundtrip():
# see gh-19891
ser = Series(timedelta_range("1day", periods=3))
result, result_bins = cut(ser, 2, retbins=True)
expected = cut(ser, result_bins)
tm.assert_series_equal(result, expected)
expected_bins = TimedeltaIndex(["0 days 23:57:07.200000",
"2 days 00:00:00",
"3 days 00:00:00"])
tm.assert_index_equal(result_bins, expected_bins)
| 32.959016
| 79
| 0.624347
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, Interval, IntervalIndex,
Series, TimedeltaIndex, Timestamp, cut, date_range, isna, qcut,
timedelta_range, to_datetime)
from pandas.api.types import CategoricalDtype as CDT
import pandas.core.reshape.tile as tmod
import pandas.util.testing as tm
def test_simple():
data = np.ones(5, dtype="int64")
result = cut(data, 4, labels=False)
expected = np.array([1, 1, 1, 1, 1])
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_bins():
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_right():
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
expected = Categorical(intervals, ordered=True)
expected = expected.take([0, 0, 0, 2, 3, 0, 0])
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7]))
def test_no_right():
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3), closed="left")
intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095]))
def test_array_like():
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_bins_from_interval_index():
c = cut(range(5), 3)
expected = c
result = cut(range(5), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
expected = Categorical.from_codes(np.append(c.codes, -1),
categories=c.categories,
ordered=True)
result = cut(range(6), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
def test_bins_from_interval_index_doc_example():
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
c = cut(ages, bins=[0, 18, 35, 70])
expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])
tm.assert_index_equal(c.categories, expected)
result = cut([25, 20, 50], bins=c.categories)
tm.assert_index_equal(result.categories, expected)
tm.assert_numpy_array_equal(result.codes,
np.array([1, 1, 2], dtype="int8"))
def test_bins_not_overlapping_from_interval_index():
msg = "Overlapping IntervalIndex is not accepted"
ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)])
with pytest.raises(ValueError, match=msg):
cut([5, 6], bins=ii)
def test_bins_not_monotonic():
msg = "bins must increase monotonically"
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
with pytest.raises(ValueError, match=msg):
cut(data, [0.1, 1.5, 1, 10])
@pytest.mark.parametrize("x, bins, expected", [
(date_range("2017-12-31", periods=3),
[Timestamp.min, Timestamp('2018-01-01'), Timestamp.max],
IntervalIndex.from_tuples([
(Timestamp.min, Timestamp('2018-01-01')),
(Timestamp('2018-01-01'), Timestamp.max)])),
([-1, 0, 1],
np.array([np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max],
dtype="int64"),
IntervalIndex.from_tuples([
(np.iinfo(np.int64).min, 0),
(0, np.iinfo(np.int64).max)])),
([np.timedelta64(-1), np.timedelta64(0), np.timedelta64(1)],
np.array([
np.timedelta64(-np.iinfo(np.int64).max),
np.timedelta64(0),
np.timedelta64(np.iinfo(np.int64).max)]),
IntervalIndex.from_tuples([
(np.timedelta64(-np.iinfo(np.int64).max), np.timedelta64(0)),
(np.timedelta64(0), np.timedelta64(np.iinfo(np.int64).max))])),
])
def test_bins_monotonic_not_overflowing(x, bins, expected):
result = cut(x, bins)
tm.assert_index_equal(result.categories, expected)
def test_wrong_num_labels():
msg = "Bin labels must be one fewer than the number of bin edges"
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
with pytest.raises(ValueError, match=msg):
cut(data, [0, 1, 10], labels=["foo", "bar", "baz"])
@pytest.mark.parametrize("x,bins,msg", [
([], 2, "Cannot cut empty array"),
([1, 2, 3], 0.5, "`bins` should be a positive integer")
])
def test_cut_corner(x, bins, msg):
with pytest.raises(ValueError, match=msg):
cut(x, bins)
@pytest.mark.parametrize("arg", [2, np.eye(2), DataFrame(np.eye(2))])
@pytest.mark.parametrize("cut_func", [cut, qcut])
def test_cut_not_1d_arg(arg, cut_func):
msg = "Input array must be 1 dimensional"
with pytest.raises(ValueError, match=msg):
cut_func(arg, 2)
@pytest.mark.parametrize('data', [
[0, 1, 2, 3, 4, np.inf],
[-np.inf, 0, 1, 2, 3, 4],
[-np.inf, 0, 1, 2, 3, 4, np.inf]])
def test_int_bins_with_inf(data):
msg = 'cannot specify integer `bins` when input data contains infinity'
with pytest.raises(ValueError, match=msg):
cut(data, bins=3)
def test_cut_out_of_range_more():
name = "x"
ser = Series([0, -1, 0, 1, -3], name=name)
ind = cut(ser, [0, 1], labels=False)
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name)
tm.assert_series_equal(ind, exp)
@pytest.mark.parametrize("right,breaks,closed", [
(True, [-1e-3, 0.25, 0.5, 0.75, 1], "right"),
(False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], "left")
])
def test_labels(right, breaks, closed):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True, right=right)
ex_levels = IntervalIndex.from_breaks(breaks, closed=closed)
tm.assert_index_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor():
name = "foo"
ser = Series(np.random.randn(100), name=name)
factor = cut(ser, 4)
assert factor.name == name
def test_label_precision():
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72])
tm.assert_index_equal(result.categories, ex_levels)
@pytest.mark.parametrize("labels", [None, False])
def test_na_handling(labels):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4, labels=labels)
result = np.asarray(result)
expected = np.where(isna(arr), np.nan, result)
tm.assert_almost_equal(result, expected)
def test_inf_handling():
data = np.arange(6)
data_ser = Series(data, dtype="int64")
bins = [-np.inf, 2, 4, np.inf]
result = cut(data, bins)
result_ser = cut(data_ser, bins)
ex_uniques = IntervalIndex.from_breaks(bins)
tm.assert_index_equal(result.categories, ex_uniques)
assert result[5] == Interval(4, np.inf)
assert result[0] == Interval(-np.inf, 2)
assert result_ser[5] == Interval(4, np.inf)
assert result_ser[0] == Interval(-np.inf, 2)
def test_cut_out_of_bounds():
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = isna(result)
ex_mask = (arr < -1) | (arr > 1)
tm.assert_numpy_array_equal(mask, ex_mask)
@pytest.mark.parametrize("get_labels,get_expected", [
(lambda labels: labels,
lambda labels: Categorical(["Medium"] + 4 * ["Small"] +
["Medium", "Large"],
categories=labels, ordered=True)),
(lambda labels: Categorical.from_codes([0, 1, 2], labels),
lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels))
])
def test_cut_pass_labels(get_labels, get_expected):
bins = [0, 25, 50, 100]
arr = [50, 5, 10, 15, 20, 30, 70]
labels = ["Small", "Medium", "Large"]
result = cut(arr, bins, labels=get_labels(labels))
tm.assert_categorical_equal(result, get_expected(labels))
def test_cut_pass_labels_compat():
arr = [50, 5, 10, 15, 20, 30, 70]
labels = ["Good", "Medium", "Bad"]
result = cut(arr, 3, labels=labels)
exp = cut(arr, 3, labels=Categorical(labels, categories=labels,
ordered=True))
tm.assert_categorical_equal(result, exp)
@pytest.mark.parametrize("x", [np.arange(11.), np.arange(11.) / 1e10])
def test_round_frac_just_works(x):
cut(x, 2)
@pytest.mark.parametrize("val,precision,expected", [
(-117.9998, 3, -118),
(117.9998, 3, 118),
(117.9998, 2, 118),
(0.000123456, 2, 0.00012)
])
def test_round_frac(val, precision, expected):
result = tmod._round_frac(val, precision=precision)
assert result == expected
def test_cut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
result = cut(ser, 3)
exp_bins = np.linspace(0, 8, num=4).round(3)
exp_bins[0] -= 0.008
expected = Series(IntervalIndex.from_breaks(exp_bins, closed="right").take(
[0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_series_ret_bins():
ser = Series(np.arange(4))
result, bins = cut(ser, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.003, 1.5, 3], closed="right").repeat(2)).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs,msg", [
(dict(duplicates="drop"), None),
(dict(), "Bin edges must be unique"),
(dict(duplicates="raise"), "Bin edges must be unique"),
(dict(duplicates="foo"), "invalid value for 'duplicates' parameter")
])
def test_cut_duplicates_bin(kwargs, msg):
bins = [0, 2, 4, 6, 10, 10]
values = Series(np.array([1, 3, 5, 7, 9]), index=["a", "b", "c", "d", "e"])
if msg is not None:
with pytest.raises(ValueError, match=msg):
cut(values, bins, **kwargs)
else:
result = cut(values, bins, **kwargs)
expected = cut(values, pd.unique(bins))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [9.0, -9.0, 0.0])
@pytest.mark.parametrize("length", [1, 2])
def test_single_bin(data, length):
ser = Series([data] * length)
result = cut(ser, 1, labels=False)
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"array_1_writeable,array_2_writeable",
[(True, True), (True, False), (False, False)])
def test_cut_read_only(array_1_writeable, array_2_writeable):
array_1 = np.arange(0, 100, 10)
array_1.flags.writeable = array_1_writeable
array_2 = np.arange(0, 100, 10)
array_2.flags.writeable = array_2_writeable
hundred_elements = np.arange(100)
tm.assert_categorical_equal(cut(hundred_elements, array_1),
cut(hundred_elements, array_2))
@pytest.mark.parametrize("conv", [
lambda v: Timestamp(v),
lambda v: to_datetime(v),
lambda v: np.datetime64(v),
lambda v: Timestamp(v).to_pydatetime(),
])
def test_datetime_bin(conv):
data = [np.datetime64("2012-12-13"), np.datetime64("2012-12-15")]
bin_data = ["2012-12-12", "2012-12-14", "2012-12-16"]
expected = Series(IntervalIndex([
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))])).astype(
CDT(ordered=True))
bins = [conv(v) for v in bin_data]
result = Series(cut(data, bins=bins))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [
to_datetime(Series(["2013-01-01", "2013-01-02", "2013-01-03"])),
[np.datetime64("2013-01-01"), np.datetime64("2013-01-02"),
np.datetime64("2013-01-03")],
np.array([np.datetime64("2013-01-01"), np.datetime64("2013-01-02"),
np.datetime64("2013-01-03")]),
DatetimeIndex(["2013-01-01", "2013-01-02", "2013-01-03"])
])
def test_datetime_cut(data):
result, _ = cut(data, 3, retbins=True)
expected = Series(IntervalIndex([
Interval(Timestamp("2012-12-31 23:57:07.200000"),
Timestamp("2013-01-01 16:00:00")),
Interval(Timestamp("2013-01-01 16:00:00"),
Timestamp("2013-01-02 08:00:00")),
Interval(Timestamp("2013-01-02 08:00:00"),
Timestamp("2013-01-03 00:00:00"))])).astype(CDT(ordered=True))
tm.assert_series_equal(Series(result), expected)
@pytest.mark.parametrize("bins", [
3, [Timestamp("2013-01-01 04:57:07.200000"),
Timestamp("2013-01-01 21:00:00"),
Timestamp("2013-01-02 13:00:00"),
Timestamp("2013-01-03 05:00:00")]])
@pytest.mark.parametrize("box", [list, np.array, Index, Series])
def test_datetime_tz_cut(bins, box):
tz = "US/Eastern"
s = Series(date_range("20130101", periods=3, tz=tz))
if not isinstance(bins, int):
bins = box(bins)
result = cut(s, bins)
expected = Series(IntervalIndex([
Interval(Timestamp("2012-12-31 23:57:07.200000", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz)),
Interval(Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz)),
Interval(Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz))])).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_datetime_nan_error():
msg = "bins must be of datetime64 dtype"
with pytest.raises(ValueError, match=msg):
cut(date_range("20130101", periods=3), bins=[0, 2, 4])
def test_datetime_nan_mask():
result = cut(date_range("20130102", periods=5),
bins=date_range("20130101", periods=2))
mask = result.categories.isna()
tm.assert_numpy_array_equal(mask, np.array([False]))
mask = result.isna()
tm.assert_numpy_array_equal(mask, np.array([False, True, True,
True, True]))
@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"])
def test_datetime_cut_roundtrip(tz):
ser = Series(date_range("20180101", periods=3, tz=tz))
result, result_bins = cut(ser, 2, retbins=True)
expected = cut(ser, result_bins)
tm.assert_series_equal(result, expected)
expected_bins = DatetimeIndex(["2017-12-31 23:57:07.200000",
"2018-01-02 00:00:00",
"2018-01-03 00:00:00"])
expected_bins = expected_bins.tz_localize(tz)
tm.assert_index_equal(result_bins, expected_bins)
def test_timedelta_cut_roundtrip():
ser = Series(timedelta_range("1day", periods=3))
result, result_bins = cut(ser, 2, retbins=True)
expected = cut(ser, result_bins)
tm.assert_series_equal(result, expected)
expected_bins = TimedeltaIndex(["0 days 23:57:07.200000",
"2 days 00:00:00",
"3 days 00:00:00"])
tm.assert_index_equal(result_bins, expected_bins)
| true
| true
|
f71733ffb9bbff488b8f191f8cc9d6dc64459192
| 2,702
|
py
|
Python
|
Solutions/Trend Micro Vision One/Data Connectors/AzureFunctionTrendMicroXDR/shared_code/configurations.py
|
sxchuba/Azure-Sentinel
|
0f5f543981901dbd9d9654f8f2d0010f8af1e9a6
|
[
"MIT"
] | 11
|
2019-02-04T13:37:14.000Z
|
2019-02-22T20:47:06.000Z
|
Solutions/Trend Micro Vision One/Data Connectors/AzureFunctionTrendMicroXDR/shared_code/configurations.py
|
sxchuba/Azure-Sentinel
|
0f5f543981901dbd9d9654f8f2d0010f8af1e9a6
|
[
"MIT"
] | 6
|
2019-02-03T13:58:50.000Z
|
2019-02-25T02:01:16.000Z
|
Solutions/Trend Micro Vision One/Data Connectors/AzureFunctionTrendMicroXDR/shared_code/configurations.py
|
sxchuba/Azure-Sentinel
|
0f5f543981901dbd9d9654f8f2d0010f8af1e9a6
|
[
"MIT"
] | 4
|
2019-02-03T09:20:27.000Z
|
2019-02-12T15:43:35.000Z
|
import os
VERSION = '1.0.1'
SIEM_NAME = 'SentinelAddon'
XDR_HOSTS = {
'us': 'https://api.xdr.trendmicro.com',
'eu': 'https://api.eu.xdr.trendmicro.com',
'in': 'https://api.in.xdr.trendmicro.com',
'jp': 'https://api.xdr.trendmicro.co.jp',
'sg': 'https://api.sg.xdr.trendmicro.com',
'au': 'https://api.au.xdr.trendmicro.com',
'uae': 'https://api.uae.xdr.trendmicro.com/',
}
def get_workspace_id():
return os.environ['workspaceId']
def get_workspace_key():
return os.environ['workspaceKey']
def get_api_tokens():
is_key_vault_enabled = (
os.getenv('keyVaultUrl')
and os.getenv('keyVaultIdentityClientId')
and os.getenv('clpIds')
)
if is_key_vault_enabled:
# get tokens from key vault
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
clp_ids = list(filter(None, os.getenv('clpIds').split(',')))
credential = DefaultAzureCredential(
managed_identity_client_id=os.getenv('keyVaultIdentityClientId')
)
client = SecretClient(vault_url=os.getenv('keyVaultUrl'), credential=credential)
return [client.get_secret(get_secret_name(clp_id)).value for clp_id in clp_ids]
else:
return list(filter(None, os.environ.get('apiTokens', '').split(',')))
def get_xdr_host_url():
xdr_host_url = os.environ.get('xdrHostUrl')
return xdr_host_url or XDR_HOSTS[os.environ['regionCode']]
def get_storage_connection_string():
return os.environ['AzureWebJobsStorage']
def get_max_workbench_query_minutes():
return int(os.environ.get('maxWorkbenchQueryMinutes', 60))
def get_default_workbench_query_minutes():
return int(os.environ.get('defaultWorkbenchQueryMinutes', 5))
def get_max_oat_query_minutes():
return int(os.environ.get('maxOatQueryMinutes', 30))
def get_default_oat_query_minutes():
return int(os.environ.get('defaultOatQueryMinutes', 5))
def get_oat_query_time_buffer_minutes():
return int(os.environ.get('defaultOatQueryTimeBufferMinutes', 15))
def get_datetime_format():
return '%Y-%m-%dT%H:%M:%S.000Z'
def get_wb_log_type():
return 'TrendMicro_XDR_WORKBENCH'
def get_health_check_log_type():
return 'TrendMicro_XDR_Health_Check'
def get_oat_health_check_log_type():
return 'TrendMicro_XDR_OAT_Health_Check'
def get_rca_log_type():
return 'TrendMicro_XDR_RCA_Result'
def get_rca_task_log_type():
return 'TrendMicro_XDR_RCA_Task'
def get_oat_log_type():
return 'TrendMicro_XDR_OAT'
def get_user_agent():
return f'TMXDR{SIEM_NAME}/{VERSION}'
def get_secret_name(clp_id):
return f'tmv1-entity-{clp_id}'
| 24.563636
| 88
| 0.707994
|
import os
VERSION = '1.0.1'
SIEM_NAME = 'SentinelAddon'
XDR_HOSTS = {
'us': 'https://api.xdr.trendmicro.com',
'eu': 'https://api.eu.xdr.trendmicro.com',
'in': 'https://api.in.xdr.trendmicro.com',
'jp': 'https://api.xdr.trendmicro.co.jp',
'sg': 'https://api.sg.xdr.trendmicro.com',
'au': 'https://api.au.xdr.trendmicro.com',
'uae': 'https://api.uae.xdr.trendmicro.com/',
}
def get_workspace_id():
return os.environ['workspaceId']
def get_workspace_key():
return os.environ['workspaceKey']
def get_api_tokens():
is_key_vault_enabled = (
os.getenv('keyVaultUrl')
and os.getenv('keyVaultIdentityClientId')
and os.getenv('clpIds')
)
if is_key_vault_enabled:
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
clp_ids = list(filter(None, os.getenv('clpIds').split(',')))
credential = DefaultAzureCredential(
managed_identity_client_id=os.getenv('keyVaultIdentityClientId')
)
client = SecretClient(vault_url=os.getenv('keyVaultUrl'), credential=credential)
return [client.get_secret(get_secret_name(clp_id)).value for clp_id in clp_ids]
else:
return list(filter(None, os.environ.get('apiTokens', '').split(',')))
def get_xdr_host_url():
xdr_host_url = os.environ.get('xdrHostUrl')
return xdr_host_url or XDR_HOSTS[os.environ['regionCode']]
def get_storage_connection_string():
return os.environ['AzureWebJobsStorage']
def get_max_workbench_query_minutes():
return int(os.environ.get('maxWorkbenchQueryMinutes', 60))
def get_default_workbench_query_minutes():
return int(os.environ.get('defaultWorkbenchQueryMinutes', 5))
def get_max_oat_query_minutes():
return int(os.environ.get('maxOatQueryMinutes', 30))
def get_default_oat_query_minutes():
return int(os.environ.get('defaultOatQueryMinutes', 5))
def get_oat_query_time_buffer_minutes():
return int(os.environ.get('defaultOatQueryTimeBufferMinutes', 15))
def get_datetime_format():
return '%Y-%m-%dT%H:%M:%S.000Z'
def get_wb_log_type():
return 'TrendMicro_XDR_WORKBENCH'
def get_health_check_log_type():
return 'TrendMicro_XDR_Health_Check'
def get_oat_health_check_log_type():
return 'TrendMicro_XDR_OAT_Health_Check'
def get_rca_log_type():
return 'TrendMicro_XDR_RCA_Result'
def get_rca_task_log_type():
return 'TrendMicro_XDR_RCA_Task'
def get_oat_log_type():
return 'TrendMicro_XDR_OAT'
def get_user_agent():
return f'TMXDR{SIEM_NAME}/{VERSION}'
def get_secret_name(clp_id):
return f'tmv1-entity-{clp_id}'
| true
| true
|
f7173583513fd2e9bcaf8f9083a5b3c6349ba684
| 8,045
|
py
|
Python
|
env/EnvMultipleStock_train.py
|
rlaplaza/Deep-Reinforcement-Learning-for-Automated-Stock-Trading-Ensemble-Strategy-ICAIF-2020
|
5fe6b8554587320bc6044164270635166c93616d
|
[
"MIT"
] | null | null | null |
env/EnvMultipleStock_train.py
|
rlaplaza/Deep-Reinforcement-Learning-for-Automated-Stock-Trading-Ensemble-Strategy-ICAIF-2020
|
5fe6b8554587320bc6044164270635166c93616d
|
[
"MIT"
] | null | null | null |
env/EnvMultipleStock_train.py
|
rlaplaza/Deep-Reinforcement-Learning-for-Automated-Stock-Trading-Ensemble-Strategy-ICAIF-2020
|
5fe6b8554587320bc6044164270635166c93616d
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# shares normalization factor
# 100 shares per trade
HMAX_NORMALIZE = 100
# initial amount of money we have in our account
INITIAL_ACCOUNT_BALANCE = 1000000
# total number of stocks in our portfolio
STOCK_DIM = 30
# transaction fee: 1/1000 reasonable percentage
TRANSACTION_FEE_PERCENT = 0.001
REWARD_SCALING = 1e-4
class StockEnvTrain(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {"render.modes": ["human"]}
def __init__(self, df, day=0):
# super(StockEnv, self).__init__()
# money = 10 , scope = 1
self.day = day
self.df = df
# action_space normalization and shape is STOCK_DIM
self.action_space = spaces.Box(low=-1, high=1, shape=(STOCK_DIM,))
# Shape = 181: [Current Balance]+[prices 1-30]+[owned shares 1-30]
# +[macd 1-30]+ [rsi 1-30] + [cci 1-30] + [adx 1-30]
self.observation_space = spaces.Box(low=0, high=np.inf, shape=(181,))
# load data from a pandas dataframe
self.data = self.df.loc[self.day, :]
self.terminal = False
# initalize state
self.state = (
[INITIAL_ACCOUNT_BALANCE]
+ self.data.adjcp.values.tolist()
+ [0] * STOCK_DIM
+ self.data.macd.values.tolist()
+ self.data.rsi.values.tolist()
+ self.data.cci.values.tolist()
+ self.data.adx.values.tolist()
)
# initialize reward
self.reward = 0
self.cost = 0
# memorize all the total balance change
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.rewards_memory = []
self.trades = 0
# self.reset()
self._seed()
def _sell_stock(self, index, action):
# perform sell action based on the sign of the action
if self.state[index + STOCK_DIM + 1] > 0:
# update balance
self.state[0] += (
self.state[index + 1]
* min(abs(action), self.state[index + STOCK_DIM + 1])
* (1 - TRANSACTION_FEE_PERCENT)
)
self.state[index + STOCK_DIM + 1] -= min(
abs(action), self.state[index + STOCK_DIM + 1]
)
self.cost += (
self.state[index + 1]
* min(abs(action), self.state[index + STOCK_DIM + 1])
* TRANSACTION_FEE_PERCENT
)
self.trades += 1
else:
pass
def _buy_stock(self, index, action):
# perform buy action based on the sign of the action
available_amount = self.state[0] // self.state[index + 1]
# print('available_amount:{}'.format(available_amount))
# update balance
self.state[0] -= (
self.state[index + 1]
* min(available_amount, action)
* (1 + TRANSACTION_FEE_PERCENT)
)
self.state[index + STOCK_DIM + 1] += min(available_amount, action)
self.cost += (
self.state[index + 1]
* min(available_amount, action)
* TRANSACTION_FEE_PERCENT
)
self.trades += 1
def step(self, actions):
# print(self.day)
self.terminal = self.day >= len(self.df.index.unique()) - 1
# print(actions)
if self.terminal:
plt.plot(self.asset_memory, "r")
plt.savefig("results/account_value_train.png")
plt.close()
end_total_asset = self.state[0] + sum(
np.array(self.state[1 : (STOCK_DIM + 1)])
* np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
)
# print("end_total_asset:{}".format(end_total_asset))
df_total_value = pd.DataFrame(self.asset_memory)
df_total_value.to_csv("results/account_value_train.csv")
# print("total_reward:{}".format(self.state[0]+sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):61]))- INITIAL_ACCOUNT_BALANCE ))
# print("total_cost: ", self.cost)
# print("total_trades: ", self.trades)
df_total_value.columns = ["account_value"]
df_total_value["daily_return"] = df_total_value.pct_change(1)
sharpe = (
(252 ** 0.5)
* df_total_value["daily_return"].mean()
/ df_total_value["daily_return"].std()
)
# print("Sharpe: ",sharpe)
# print("=================================")
df_rewards = pd.DataFrame(self.rewards_memory)
# df_rewards.to_csv('results/account_rewards_train.csv')
# print('total asset: {}'.format(self.state[0]+ sum(np.array(self.state[1:29])*np.array(self.state[29:]))))
# with open('obs.pkl', 'wb') as f:
# pickle.dump(self.state, f)
return self.state, self.reward, self.terminal, {}
else:
# print(np.array(self.state[1:29]))
actions = actions * HMAX_NORMALIZE
# actions = (actions.astype(int))
begin_total_asset = self.state[0] + sum(
np.array(self.state[1 : (STOCK_DIM + 1)])
* np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
)
# print("begin_total_asset:{}".format(begin_total_asset))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print('take sell action'.format(actions[index]))
self._sell_stock(index, actions[index])
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
self._buy_stock(index, actions[index])
self.day += 1
self.data = self.df.loc[self.day, :]
# load next state
# print("stock_shares:{}".format(self.state[29:]))
self.state = (
[self.state[0]]
+ self.data.adjcp.values.tolist()
+ list(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
+ self.data.macd.values.tolist()
+ self.data.rsi.values.tolist()
+ self.data.cci.values.tolist()
+ self.data.adx.values.tolist()
)
end_total_asset = self.state[0] + sum(
np.array(self.state[1 : (STOCK_DIM + 1)])
* np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
)
self.asset_memory.append(end_total_asset)
# print("end_total_asset:{}".format(end_total_asset))
self.reward = end_total_asset - begin_total_asset
# print("step_reward:{}".format(self.reward))
self.rewards_memory.append(self.reward)
self.reward = self.reward * REWARD_SCALING
return self.state, self.reward, self.terminal, {}
def reset(self):
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.day = 0
self.data = self.df.loc[self.day, :]
self.cost = 0
self.trades = 0
self.terminal = False
self.rewards_memory = []
# initiate state
self.state = (
[INITIAL_ACCOUNT_BALANCE]
+ self.data.adjcp.values.tolist()
+ [0] * STOCK_DIM
+ self.data.macd.values.tolist()
+ self.data.rsi.values.tolist()
+ self.data.cci.values.tolist()
+ self.data.adx.values.tolist()
)
# iteration += 1
return self.state
def render(self, mode="human"):
return self.state
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
| 35.915179
| 168
| 0.549037
|
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
HMAX_NORMALIZE = 100
INITIAL_ACCOUNT_BALANCE = 1000000
STOCK_DIM = 30
TRANSACTION_FEE_PERCENT = 0.001
REWARD_SCALING = 1e-4
class StockEnvTrain(gym.Env):
metadata = {"render.modes": ["human"]}
def __init__(self, df, day=0):
self.day = day
self.df = df
self.action_space = spaces.Box(low=-1, high=1, shape=(STOCK_DIM,))
self.observation_space = spaces.Box(low=0, high=np.inf, shape=(181,))
self.data = self.df.loc[self.day, :]
self.terminal = False
self.state = (
[INITIAL_ACCOUNT_BALANCE]
+ self.data.adjcp.values.tolist()
+ [0] * STOCK_DIM
+ self.data.macd.values.tolist()
+ self.data.rsi.values.tolist()
+ self.data.cci.values.tolist()
+ self.data.adx.values.tolist()
)
self.reward = 0
self.cost = 0
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.rewards_memory = []
self.trades = 0
self._seed()
def _sell_stock(self, index, action):
if self.state[index + STOCK_DIM + 1] > 0:
self.state[0] += (
self.state[index + 1]
* min(abs(action), self.state[index + STOCK_DIM + 1])
* (1 - TRANSACTION_FEE_PERCENT)
)
self.state[index + STOCK_DIM + 1] -= min(
abs(action), self.state[index + STOCK_DIM + 1]
)
self.cost += (
self.state[index + 1]
* min(abs(action), self.state[index + STOCK_DIM + 1])
* TRANSACTION_FEE_PERCENT
)
self.trades += 1
else:
pass
def _buy_stock(self, index, action):
available_amount = self.state[0] // self.state[index + 1]
self.state[0] -= (
self.state[index + 1]
* min(available_amount, action)
* (1 + TRANSACTION_FEE_PERCENT)
)
self.state[index + STOCK_DIM + 1] += min(available_amount, action)
self.cost += (
self.state[index + 1]
* min(available_amount, action)
* TRANSACTION_FEE_PERCENT
)
self.trades += 1
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique()) - 1
if self.terminal:
plt.plot(self.asset_memory, "r")
plt.savefig("results/account_value_train.png")
plt.close()
end_total_asset = self.state[0] + sum(
np.array(self.state[1 : (STOCK_DIM + 1)])
* np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
)
df_total_value = pd.DataFrame(self.asset_memory)
df_total_value.to_csv("results/account_value_train.csv")
df_total_value.columns = ["account_value"]
df_total_value["daily_return"] = df_total_value.pct_change(1)
sharpe = (
(252 ** 0.5)
* df_total_value["daily_return"].mean()
/ df_total_value["daily_return"].std()
)
df_rewards = pd.DataFrame(self.rewards_memory)
return self.state, self.reward, self.terminal, {}
else:
actions = actions * HMAX_NORMALIZE
begin_total_asset = self.state[0] + sum(
np.array(self.state[1 : (STOCK_DIM + 1)])
* np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
)
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]]
for index in sell_index:
self._sell_stock(index, actions[index])
for index in buy_index:
self._buy_stock(index, actions[index])
self.day += 1
self.data = self.df.loc[self.day, :]
self.state = (
[self.state[0]]
+ self.data.adjcp.values.tolist()
+ list(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
+ self.data.macd.values.tolist()
+ self.data.rsi.values.tolist()
+ self.data.cci.values.tolist()
+ self.data.adx.values.tolist()
)
end_total_asset = self.state[0] + sum(
np.array(self.state[1 : (STOCK_DIM + 1)])
* np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])
)
self.asset_memory.append(end_total_asset)
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
self.reward = self.reward * REWARD_SCALING
return self.state, self.reward, self.terminal, {}
def reset(self):
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.day = 0
self.data = self.df.loc[self.day, :]
self.cost = 0
self.trades = 0
self.terminal = False
self.rewards_memory = []
self.state = (
[INITIAL_ACCOUNT_BALANCE]
+ self.data.adjcp.values.tolist()
+ [0] * STOCK_DIM
+ self.data.macd.values.tolist()
+ self.data.rsi.values.tolist()
+ self.data.cci.values.tolist()
+ self.data.adx.values.tolist()
)
return self.state
def render(self, mode="human"):
return self.state
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
| true
| true
|
f71735a115665e8722defa42706030357a5b5f70
| 13,762
|
py
|
Python
|
src/asldro/validators/test_user_parameter_input.py
|
gold-standard-phantoms/asldro
|
6ae82ed69d66fed64e1e54e5394cc3b5d8dbe1bd
|
[
"MIT"
] | 3
|
2021-03-09T15:51:32.000Z
|
2021-05-19T13:05:18.000Z
|
src/asldro/validators/test_user_parameter_input.py
|
gold-standard-phantoms/asldro
|
6ae82ed69d66fed64e1e54e5394cc3b5d8dbe1bd
|
[
"MIT"
] | null | null | null |
src/asldro/validators/test_user_parameter_input.py
|
gold-standard-phantoms/asldro
|
6ae82ed69d66fed64e1e54e5394cc3b5d8dbe1bd
|
[
"MIT"
] | null | null | null |
""" Tests some user inputs to the model to make sure the validation is performed correctly """
# pylint: disable=redefined-outer-name
from copy import deepcopy
import pytest
from asldro.data.filepaths import GROUND_TRUTH_DATA
from asldro.validators.parameters import ValidationError
from asldro.validators.user_parameter_input import (
IMAGE_TYPE_VALIDATOR,
ASL,
GROUND_TRUTH,
STRUCTURAL,
validate_input_params,
get_example_input_params,
)
def test_user_input_valid():
""" Tests a valid set of inputs """
d = {
"label_type": "PASL",
"label_duration": 2.0,
"signal_time": 2.5,
"label_efficiency": 0.8,
"lambda_blood_brain": 0.9,
"t1_arterial_blood": 85,
"m0": 0.7,
"asl_context": "m0scan control label control label",
"echo_time": [0, 1, 2, 3, 4],
"repetition_time": [3, 4.5, 5, 6.4, 1.2],
"rot_z": [-180, 180, 0, 0, 0],
"rot_y": [0.0, 180.0, 0.0, 180.0, 1.2],
"rot_x": [-180.0, 0, 0.2, 3.0, 1.3],
"transl_x": [-1000, 0.0, 5.6, 6.7, 7.8],
"transl_y": [0.0, 1000.0, 0.3, 100.6, 2.3],
"transl_z": [5.6, 1.3, 1.2, 1.3, 1.2],
"desired_snr": 5.0,
"acq_matrix": [8, 9, 10],
"acq_contrast": "se",
"random_seed": 123_871_263,
"excitation_flip_angle": 35.6,
"inversion_flip_angle": 164.0,
"inversion_time": 1.0,
}
assert d == IMAGE_TYPE_VALIDATOR[ASL].validate(
d
) # the same dictionary should be returned
def test_asl_user_input_defaults_created():
""" Test default values for the asl image type """
correct_defaults = {
"label_type": "pcasl",
"asl_context": "m0scan control label",
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
"label_duration": 1.8,
"signal_time": 3.6,
"label_efficiency": 0.85,
"desired_snr": 100,
"acq_matrix": [64, 64, 12],
"acq_contrast": "se",
"random_seed": 0,
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
}
# Validation should include inputs
assert IMAGE_TYPE_VALIDATOR[ASL].validate({}) == correct_defaults
# Get the defaults directly
assert IMAGE_TYPE_VALIDATOR[ASL].get_defaults() == correct_defaults
def test_structural_user_input_defaults_created():
""" Test default values for the structural image type """
correct_defaults = {
"echo_time": 0.005,
"repetition_time": 0.3,
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
"acq_matrix": [197, 233, 189],
"acq_contrast": "se",
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
"desired_snr": 50.0,
"random_seed": 0,
"output_image_type": "magnitude",
"modality": "anat",
}
# Validation should include inputs
assert IMAGE_TYPE_VALIDATOR[STRUCTURAL].validate({}) == correct_defaults
# Get the defaults directly
assert IMAGE_TYPE_VALIDATOR[STRUCTURAL].get_defaults() == correct_defaults
def test_ground_truth_user_input_defaults_created():
""" Test default values for the ground_truth image type """
correct_defaults = {
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
"acq_matrix": [64, 64, 12],
}
# Validation should include inputs
assert IMAGE_TYPE_VALIDATOR[GROUND_TRUTH].validate({}) == correct_defaults
# Get the defaults directly
assert IMAGE_TYPE_VALIDATOR[GROUND_TRUTH].get_defaults() == correct_defaults
def test_mismatch_asl_context_array_sizes():
"""Check that if the length of any of:
- echo_time
- repetition_time
- rot_z
- rot_y
- rot_x
- transl_x
- transl_y
- transl_z
does not match the number of items in asl_context, a ValidationError
will be raised with an appropriate error message
"""
good_input = {
"label_type": "PASL",
"asl_context": "m0scan control label",
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
}
IMAGE_TYPE_VALIDATOR[ASL].validate(good_input) # no exception
for param in [
"echo_time",
"repetition_time",
"rot_x",
"rot_y",
"rot_z",
"transl_z",
"transl_y",
"transl_x",
]:
d = deepcopy(good_input)
d[param] = [0.1, 0.2, 0.3, 0.4] # wrong number of parameters
with pytest.raises(
ValidationError,
match=f"{param} must be present and have the same number of entries as asl_context",
):
IMAGE_TYPE_VALIDATOR[ASL].validate(d)
@pytest.fixture
def input_params():
""" A valid input parameter config """
return {
"global_configuration": {
"ground_truth": "hrgt_icbm_2009a_nls_3t",
"image_override": {"m0": 5.0},
"parameter_override": {"lambda_blood_brain": 0.85},
"ground_truth_modulate": {
"t1": {"scale": 0.5},
"t2": {"offset": 2},
"m0": {"scale": 2, "offset": 1.5},
},
},
"image_series": [
{
"series_type": "asl",
"series_description": "user description for asl",
"series_parameters": {
"asl_context": "m0scan control label",
"label_type": "pcasl",
"acq_matrix": [64, 64, 20],
},
},
{
"series_type": "structural",
"series_description": "user description for structural scan",
"series_parameters": {
"acq_contrast": "ge",
"echo_time": 0.05,
"repetition_time": 0.3,
"acq_matrix": [256, 256, 128],
},
},
{
"series_type": "ground_truth",
"series_description": "user description for ground truth",
"series_parameters": {"acq_matrix": [64, 64, 20]},
},
],
}
@pytest.fixture(name="expected_parsed_input")
def fixture_expected_parsed_input():
return {
"global_configuration": {
"ground_truth": {
"nii": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["nii"],
"json": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["json"],
},
"image_override": {"m0": 5.0},
"parameter_override": {"lambda_blood_brain": 0.85},
"ground_truth_modulate": {
"t1": {"scale": 0.5},
"t2": {"offset": 2},
"m0": {"scale": 2, "offset": 1.5},
},
},
"image_series": [
{
"series_type": "asl",
"series_description": "user description for asl",
"series_parameters": {
"asl_context": "m0scan control label",
"label_type": "pcasl",
"acq_matrix": [64, 64, 20],
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
"label_duration": 1.8,
"signal_time": 3.6,
"label_efficiency": 0.85,
"desired_snr": 100.0,
"acq_contrast": "se",
"random_seed": 0,
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
},
},
{
"series_type": "structural",
"series_description": "user description for structural scan",
"series_parameters": {
"echo_time": 0.05,
"repetition_time": 0.3,
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
"acq_matrix": [256, 256, 128],
"acq_contrast": "ge",
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
"desired_snr": 50.0,
"random_seed": 0,
"output_image_type": "magnitude",
"modality": "anat",
},
},
{
"series_type": "ground_truth",
"series_description": "user description for ground truth",
"series_parameters": {
"acq_matrix": [64, 64, 20],
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
},
},
],
}
def test_valid_input_params(input_params: dict, expected_parsed_input: dict):
"""Test that a valid input parameter file is parsed without
raising an exception and that the appropriate defaults are inserted"""
# Should not raise an exception
parsed_input = validate_input_params(input_params)
assert parsed_input == expected_parsed_input
# Also, try changing the ground_truth to the nifti file
# in the HRGT data (JSON file assumed same name)
input_params["global_configuration"]["ground_truth"] = GROUND_TRUTH_DATA[
"hrgt_icbm_2009a_nls_3t"
]["nii"]
# Should not raise an exception
parsed_input = validate_input_params(input_params)
assert parsed_input == expected_parsed_input
# Also, try changing the ground_truth to the nifti file/json file
# in the HRGT data
input_params["global_configuration"]["ground_truth"] = {
"nii": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["nii"],
"json": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["json"],
}
# Should not raise an exception
parsed_input = validate_input_params(input_params)
assert parsed_input == expected_parsed_input
def test_invalid_data_input_params(input_params: dict):
"""Tests that bad ground_truth data set in the input parameters
raises appropriate Expections (should always be
asldro.validators.parameters.ValidationError)"""
input_params["global_configuration"]["ground_truth"] = "i_dont_exist"
with pytest.raises(ValidationError):
validate_input_params(input_params)
input_params["global_configuration"]["image_override"] = "a_string"
with pytest.raises(ValidationError):
validate_input_params(input_params)
input_params["global_configuration"]["image_override"] = {"m0": "a_string"}
with pytest.raises(ValidationError):
validate_input_params(input_params)
def test_bad_series_type_input_params(input_params: dict):
"""Tests that bad series_type data set in the input parameters
raises appropriate Expections (should always be
asldro.validators.parameters.ValidationError)"""
input_params["image_series"][0]["series_type"] = "magic"
with pytest.raises(ValidationError):
validate_input_params(input_params)
def test_missing_series_parameters_inserts_defaults(input_params: dict):
"""Tests that if series_parameters are completely missing for
an image series, the defaults are inserted"""
input_params["image_series"][0].pop("series_parameters")
# The default series parameters should be added
assert validate_input_params(input_params)["image_series"][0] == {
"series_type": "asl",
"series_description": "user description for asl",
"series_parameters": {
"asl_context": "m0scan control label",
"label_type": "pcasl",
"acq_matrix": [64, 64, 12],
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
"label_duration": 1.8,
"signal_time": 3.6,
"label_efficiency": 0.85,
"desired_snr": 100.0,
"acq_contrast": "se",
"random_seed": 0,
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
},
}
def test_example_input_params_valid():
"""Just test that the generated example input parameters pass
the validation (validated internally)"""
validate_input_params(get_example_input_params())
| 34.840506
| 96
| 0.540256
|
from copy import deepcopy
import pytest
from asldro.data.filepaths import GROUND_TRUTH_DATA
from asldro.validators.parameters import ValidationError
from asldro.validators.user_parameter_input import (
IMAGE_TYPE_VALIDATOR,
ASL,
GROUND_TRUTH,
STRUCTURAL,
validate_input_params,
get_example_input_params,
)
def test_user_input_valid():
d = {
"label_type": "PASL",
"label_duration": 2.0,
"signal_time": 2.5,
"label_efficiency": 0.8,
"lambda_blood_brain": 0.9,
"t1_arterial_blood": 85,
"m0": 0.7,
"asl_context": "m0scan control label control label",
"echo_time": [0, 1, 2, 3, 4],
"repetition_time": [3, 4.5, 5, 6.4, 1.2],
"rot_z": [-180, 180, 0, 0, 0],
"rot_y": [0.0, 180.0, 0.0, 180.0, 1.2],
"rot_x": [-180.0, 0, 0.2, 3.0, 1.3],
"transl_x": [-1000, 0.0, 5.6, 6.7, 7.8],
"transl_y": [0.0, 1000.0, 0.3, 100.6, 2.3],
"transl_z": [5.6, 1.3, 1.2, 1.3, 1.2],
"desired_snr": 5.0,
"acq_matrix": [8, 9, 10],
"acq_contrast": "se",
"random_seed": 123_871_263,
"excitation_flip_angle": 35.6,
"inversion_flip_angle": 164.0,
"inversion_time": 1.0,
}
assert d == IMAGE_TYPE_VALIDATOR[ASL].validate(
d
)
def test_asl_user_input_defaults_created():
correct_defaults = {
"label_type": "pcasl",
"asl_context": "m0scan control label",
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
"label_duration": 1.8,
"signal_time": 3.6,
"label_efficiency": 0.85,
"desired_snr": 100,
"acq_matrix": [64, 64, 12],
"acq_contrast": "se",
"random_seed": 0,
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
}
assert IMAGE_TYPE_VALIDATOR[ASL].validate({}) == correct_defaults
assert IMAGE_TYPE_VALIDATOR[ASL].get_defaults() == correct_defaults
def test_structural_user_input_defaults_created():
correct_defaults = {
"echo_time": 0.005,
"repetition_time": 0.3,
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
"acq_matrix": [197, 233, 189],
"acq_contrast": "se",
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
"desired_snr": 50.0,
"random_seed": 0,
"output_image_type": "magnitude",
"modality": "anat",
}
assert IMAGE_TYPE_VALIDATOR[STRUCTURAL].validate({}) == correct_defaults
assert IMAGE_TYPE_VALIDATOR[STRUCTURAL].get_defaults() == correct_defaults
def test_ground_truth_user_input_defaults_created():
correct_defaults = {
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
"acq_matrix": [64, 64, 12],
}
assert IMAGE_TYPE_VALIDATOR[GROUND_TRUTH].validate({}) == correct_defaults
assert IMAGE_TYPE_VALIDATOR[GROUND_TRUTH].get_defaults() == correct_defaults
def test_mismatch_asl_context_array_sizes():
good_input = {
"label_type": "PASL",
"asl_context": "m0scan control label",
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
}
IMAGE_TYPE_VALIDATOR[ASL].validate(good_input)
for param in [
"echo_time",
"repetition_time",
"rot_x",
"rot_y",
"rot_z",
"transl_z",
"transl_y",
"transl_x",
]:
d = deepcopy(good_input)
d[param] = [0.1, 0.2, 0.3, 0.4]
with pytest.raises(
ValidationError,
match=f"{param} must be present and have the same number of entries as asl_context",
):
IMAGE_TYPE_VALIDATOR[ASL].validate(d)
@pytest.fixture
def input_params():
return {
"global_configuration": {
"ground_truth": "hrgt_icbm_2009a_nls_3t",
"image_override": {"m0": 5.0},
"parameter_override": {"lambda_blood_brain": 0.85},
"ground_truth_modulate": {
"t1": {"scale": 0.5},
"t2": {"offset": 2},
"m0": {"scale": 2, "offset": 1.5},
},
},
"image_series": [
{
"series_type": "asl",
"series_description": "user description for asl",
"series_parameters": {
"asl_context": "m0scan control label",
"label_type": "pcasl",
"acq_matrix": [64, 64, 20],
},
},
{
"series_type": "structural",
"series_description": "user description for structural scan",
"series_parameters": {
"acq_contrast": "ge",
"echo_time": 0.05,
"repetition_time": 0.3,
"acq_matrix": [256, 256, 128],
},
},
{
"series_type": "ground_truth",
"series_description": "user description for ground truth",
"series_parameters": {"acq_matrix": [64, 64, 20]},
},
],
}
@pytest.fixture(name="expected_parsed_input")
def fixture_expected_parsed_input():
return {
"global_configuration": {
"ground_truth": {
"nii": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["nii"],
"json": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["json"],
},
"image_override": {"m0": 5.0},
"parameter_override": {"lambda_blood_brain": 0.85},
"ground_truth_modulate": {
"t1": {"scale": 0.5},
"t2": {"offset": 2},
"m0": {"scale": 2, "offset": 1.5},
},
},
"image_series": [
{
"series_type": "asl",
"series_description": "user description for asl",
"series_parameters": {
"asl_context": "m0scan control label",
"label_type": "pcasl",
"acq_matrix": [64, 64, 20],
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
"label_duration": 1.8,
"signal_time": 3.6,
"label_efficiency": 0.85,
"desired_snr": 100.0,
"acq_contrast": "se",
"random_seed": 0,
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
},
},
{
"series_type": "structural",
"series_description": "user description for structural scan",
"series_parameters": {
"echo_time": 0.05,
"repetition_time": 0.3,
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
"acq_matrix": [256, 256, 128],
"acq_contrast": "ge",
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
"desired_snr": 50.0,
"random_seed": 0,
"output_image_type": "magnitude",
"modality": "anat",
},
},
{
"series_type": "ground_truth",
"series_description": "user description for ground truth",
"series_parameters": {
"acq_matrix": [64, 64, 20],
"rot_z": 0.0,
"rot_y": 0.0,
"rot_x": 0.0,
"transl_x": 0.0,
"transl_y": 0.0,
"transl_z": 0.0,
},
},
],
}
def test_valid_input_params(input_params: dict, expected_parsed_input: dict):
parsed_input = validate_input_params(input_params)
assert parsed_input == expected_parsed_input
input_params["global_configuration"]["ground_truth"] = GROUND_TRUTH_DATA[
"hrgt_icbm_2009a_nls_3t"
]["nii"]
parsed_input = validate_input_params(input_params)
assert parsed_input == expected_parsed_input
input_params["global_configuration"]["ground_truth"] = {
"nii": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["nii"],
"json": GROUND_TRUTH_DATA["hrgt_icbm_2009a_nls_3t"]["json"],
}
parsed_input = validate_input_params(input_params)
assert parsed_input == expected_parsed_input
def test_invalid_data_input_params(input_params: dict):
input_params["global_configuration"]["ground_truth"] = "i_dont_exist"
with pytest.raises(ValidationError):
validate_input_params(input_params)
input_params["global_configuration"]["image_override"] = "a_string"
with pytest.raises(ValidationError):
validate_input_params(input_params)
input_params["global_configuration"]["image_override"] = {"m0": "a_string"}
with pytest.raises(ValidationError):
validate_input_params(input_params)
def test_bad_series_type_input_params(input_params: dict):
input_params["image_series"][0]["series_type"] = "magic"
with pytest.raises(ValidationError):
validate_input_params(input_params)
def test_missing_series_parameters_inserts_defaults(input_params: dict):
input_params["image_series"][0].pop("series_parameters")
assert validate_input_params(input_params)["image_series"][0] == {
"series_type": "asl",
"series_description": "user description for asl",
"series_parameters": {
"asl_context": "m0scan control label",
"label_type": "pcasl",
"acq_matrix": [64, 64, 12],
"echo_time": [0.01, 0.01, 0.01],
"repetition_time": [10.0, 5.0, 5.0],
"rot_z": [0.0, 0.0, 0.0],
"rot_y": [0.0, 0.0, 0.0],
"rot_x": [0.0, 0.0, 0.0],
"transl_x": [0.0, 0.0, 0.0],
"transl_y": [0.0, 0.0, 0.0],
"transl_z": [0.0, 0.0, 0.0],
"label_duration": 1.8,
"signal_time": 3.6,
"label_efficiency": 0.85,
"desired_snr": 100.0,
"acq_contrast": "se",
"random_seed": 0,
"excitation_flip_angle": 90.0,
"inversion_flip_angle": 180.0,
"inversion_time": 1.0,
},
}
def test_example_input_params_valid():
validate_input_params(get_example_input_params())
| true
| true
|
f71736471faae4b0d81905d43eaa8efbc4764f0e
| 3,424
|
py
|
Python
|
superbench/benchmarks/micro_benchmarks/cuda_memory_bw_performance.py
|
yangpanMS/superbenchmark
|
4d85630abba0fe45b8cd3a51e79c15e6ac87a1e6
|
[
"MIT"
] | 59
|
2021-04-12T09:44:23.000Z
|
2022-03-27T14:33:46.000Z
|
superbench/benchmarks/micro_benchmarks/cuda_memory_bw_performance.py
|
yangpanMS/superbenchmark
|
4d85630abba0fe45b8cd3a51e79c15e6ac87a1e6
|
[
"MIT"
] | 275
|
2021-03-29T06:40:34.000Z
|
2022-03-30T07:35:49.000Z
|
superbench/benchmarks/micro_benchmarks/cuda_memory_bw_performance.py
|
yangpanMS/superbenchmark
|
4d85630abba0fe45b8cd3a51e79c15e6ac87a1e6
|
[
"MIT"
] | 24
|
2021-04-09T12:42:27.000Z
|
2022-03-16T08:26:34.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Module of the Cuda memory performance benchmarks."""
import os
import re
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.benchmarks.micro_benchmarks import MemBwBenchmark
class CudaMemBwBenchmark(MemBwBenchmark):
"""The Cuda memory performance benchmark class."""
def __init__(self, name, parameters=''):
"""Constructor.
Args:
name (str): benchmark name.
parameters (str): benchmark parameters.
"""
super().__init__(name, parameters)
self._bin_name = 'bandwidthTest'
def add_parser_arguments(self):
"""Add the specified arguments."""
super().add_parser_arguments()
self._parser.add_argument(
'--shmoo_mode',
action='store_true',
default=False,
help='Enable shmoo mode for bandwidthtest.',
)
def _preprocess(self):
"""Preprocess/preparation operations before the benchmarking.
Return:
True if _preprocess() succeed.
"""
if not super()._preprocess():
return False
# Check the arguments and generate the commands
for mem_type in self._args.mem_type:
command = os.path.join(self._args.bin_dir, self._bin_name)
command += ' --' + mem_type
if self._args.shmoo_mode:
command += ' mode=shmoo'
if self._args.memory == 'pinned':
command += ' memory=pinned'
command += ' --csv'
self._commands.append(command)
return True
def _process_raw_result(self, cmd_idx, raw_output):
"""Function to parse raw results and save the summarized results.
self._result.add_raw_data() and self._result.add_result() need to be called to save the results.
Args:
cmd_idx (int): the index of command corresponding with the raw_output.
raw_output (str): raw output string of the micro-benchmark.
Return:
True if the raw output string is valid and result can be extracted.
"""
self._result.add_raw_data('raw_output_' + self._args.mem_type[cmd_idx], raw_output)
mem_bw = -1
valid = True
content = raw_output.splitlines()
try:
metric = self._metrics[self._mem_types.index(self._args.mem_type[cmd_idx])]
parse_logline = self._parse_logline_map[self._args.mem_type[cmd_idx]]
for line in content:
if parse_logline in line:
line = line.split(',')[1]
value = re.search(r'(\d+.\d+)', line)
if value:
mem_bw = max(mem_bw, float(value.group(0)))
except BaseException:
valid = False
finally:
if valid is False or mem_bw == -1:
logger.error(
'The result format is invalid - round: {}, benchmark: {}, raw output: {}.'.format(
self._curr_run_index, self._name, raw_output
)
)
return False
self._result.add_result(metric, mem_bw)
return True
BenchmarkRegistry.register_benchmark('mem-bw', CudaMemBwBenchmark, platform=Platform.CUDA)
| 33.242718
| 106
| 0.591414
|
import os
import re
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.benchmarks.micro_benchmarks import MemBwBenchmark
class CudaMemBwBenchmark(MemBwBenchmark):
def __init__(self, name, parameters=''):
super().__init__(name, parameters)
self._bin_name = 'bandwidthTest'
def add_parser_arguments(self):
super().add_parser_arguments()
self._parser.add_argument(
'--shmoo_mode',
action='store_true',
default=False,
help='Enable shmoo mode for bandwidthtest.',
)
def _preprocess(self):
if not super()._preprocess():
return False
for mem_type in self._args.mem_type:
command = os.path.join(self._args.bin_dir, self._bin_name)
command += ' --' + mem_type
if self._args.shmoo_mode:
command += ' mode=shmoo'
if self._args.memory == 'pinned':
command += ' memory=pinned'
command += ' --csv'
self._commands.append(command)
return True
def _process_raw_result(self, cmd_idx, raw_output):
self._result.add_raw_data('raw_output_' + self._args.mem_type[cmd_idx], raw_output)
mem_bw = -1
valid = True
content = raw_output.splitlines()
try:
metric = self._metrics[self._mem_types.index(self._args.mem_type[cmd_idx])]
parse_logline = self._parse_logline_map[self._args.mem_type[cmd_idx]]
for line in content:
if parse_logline in line:
line = line.split(',')[1]
value = re.search(r'(\d+.\d+)', line)
if value:
mem_bw = max(mem_bw, float(value.group(0)))
except BaseException:
valid = False
finally:
if valid is False or mem_bw == -1:
logger.error(
'The result format is invalid - round: {}, benchmark: {}, raw output: {}.'.format(
self._curr_run_index, self._name, raw_output
)
)
return False
self._result.add_result(metric, mem_bw)
return True
BenchmarkRegistry.register_benchmark('mem-bw', CudaMemBwBenchmark, platform=Platform.CUDA)
| true
| true
|
f717365a8e51a89fa39fe965f60ce80a0e94df41
| 2,243
|
py
|
Python
|
apps/project/views.py
|
fsantand/bugtracker
|
c3d3ec4f6962dcedb2d569177bf5e53c48165da0
|
[
"MIT"
] | null | null | null |
apps/project/views.py
|
fsantand/bugtracker
|
c3d3ec4f6962dcedb2d569177bf5e53c48165da0
|
[
"MIT"
] | 3
|
2021-03-30T13:43:20.000Z
|
2021-06-10T19:39:51.000Z
|
apps/project/views.py
|
fsantand/bugtracker
|
c3d3ec4f6962dcedb2d569177bf5e53c48165da0
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.views.generic import DetailView, CreateView, TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Project
from apps.bug.models import Bug, Comment
# Create your views here.
class ProjectDetail(DetailView):
model = Project
class ProjectCreate(LoginRequiredMixin,CreateView):
model = Project
fields = ['name', 'short_description']
login_url = '/login/'
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class ReportBug(CreateView):
model = Bug
template_name = "project/report_bug.html"
fields = [
'title',
'classification',
'description'
]
def get_context_data(self, **kwargs):
self.user = self.request.user
self.project = Project.objects.get(pk = self.kwargs['pk'])
ctx = super(ReportBug, self).get_context_data(**kwargs)
ctx['user'] = self.user
ctx['project'] = self.project
return ctx
def form_valid(self, form):
form.instance.project = Project.objects.get(pk = self.kwargs['pk'])
form.instance.reporter = self.request.user
form.instance.bug_number = form.instance.project.get_bug_num()
return super().form_valid(form)
class BugThread(TemplateView):
template_name = 'bug/bug_detail.html'
def post(self, request, project, bug_number):
bug = Bug.objects.get(project__pk = project, bug_number = bug_number)
comm = Comment(
bug = bug,
commenter = request.user,
comment = request.POST['comment']
)
if 'close_thread' in request.POST.keys():
close_thread = request.POST['close_thread']
print(close_thread)
if close_thread:
bug.close_bug()
print(f'The bug {bug} is now closed.')
comm.save()
return redirect('bug-thread', project, bug_number)
def get_context_data(self, **kwargs):
bug = Bug.objects.get(project__pk = self.kwargs['project'], bug_number = self.kwargs['bug_number'])
ctx = {
'bug': bug
}
return ctx
| 31.591549
| 107
| 0.633081
|
from django.shortcuts import render, redirect
from django.views.generic import DetailView, CreateView, TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Project
from apps.bug.models import Bug, Comment
class ProjectDetail(DetailView):
model = Project
class ProjectCreate(LoginRequiredMixin,CreateView):
model = Project
fields = ['name', 'short_description']
login_url = '/login/'
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class ReportBug(CreateView):
model = Bug
template_name = "project/report_bug.html"
fields = [
'title',
'classification',
'description'
]
def get_context_data(self, **kwargs):
self.user = self.request.user
self.project = Project.objects.get(pk = self.kwargs['pk'])
ctx = super(ReportBug, self).get_context_data(**kwargs)
ctx['user'] = self.user
ctx['project'] = self.project
return ctx
def form_valid(self, form):
form.instance.project = Project.objects.get(pk = self.kwargs['pk'])
form.instance.reporter = self.request.user
form.instance.bug_number = form.instance.project.get_bug_num()
return super().form_valid(form)
class BugThread(TemplateView):
template_name = 'bug/bug_detail.html'
def post(self, request, project, bug_number):
bug = Bug.objects.get(project__pk = project, bug_number = bug_number)
comm = Comment(
bug = bug,
commenter = request.user,
comment = request.POST['comment']
)
if 'close_thread' in request.POST.keys():
close_thread = request.POST['close_thread']
print(close_thread)
if close_thread:
bug.close_bug()
print(f'The bug {bug} is now closed.')
comm.save()
return redirect('bug-thread', project, bug_number)
def get_context_data(self, **kwargs):
bug = Bug.objects.get(project__pk = self.kwargs['project'], bug_number = self.kwargs['bug_number'])
ctx = {
'bug': bug
}
return ctx
| true
| true
|
f717376b324600cdb7a435b02cd031e066e6ac60
| 13,985
|
py
|
Python
|
src/openprocurement/framework/core/utils.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/framework/core/utils.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/framework/core/utils.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
from functools import partial, wraps
from logging import getLogger
from time import sleep
from cornice.resource import resource
from couchdb import ResourceConflict
from dateorro import calc_datetime
from jsonpointer import resolve_pointer
from pyramid.compat import decode_path_info
from pyramid.exceptions import URLDecodeError
from openprocurement.api.constants import WORKING_DAYS
from openprocurement.api.utils import (
error_handler,
update_logging_context,
set_modetest_titles,
get_revision_changes,
get_now,
handle_store_exceptions,
context_unpack,
apply_data_patch,
append_revision,
get_doc_by_id,
ACCELERATOR_RE,
generate_id,
)
from openprocurement.framework.core.models import IAgreement
from openprocurement.framework.core.traversal import (
framework_factory,
submission_factory,
qualification_factory,
agreement_factory,
)
LOGGER = getLogger("openprocurement.framework.core")
ENQUIRY_PERIOD_DURATION = 10
SUBMISSION_STAND_STILL_DURATION = 30
frameworksresource = partial(resource, error_handler=error_handler, factory=framework_factory)
submissionsresource = partial(resource, error_handler=error_handler, factory=submission_factory)
qualificationsresource = partial(resource, error_handler=error_handler, factory=qualification_factory)
agreementsresource = partial(resource, error_handler=error_handler, factory=agreement_factory)
class isFramework(object):
"""Framework Route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "frameworkType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.framework is not None:
return getattr(request.framework, "frameworkType", None) == self.val
return False
class isSubmission(object):
"""Submission Route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "submissionType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.submission is not None:
return getattr(request.submission, "submissionType", None) == self.val
return False
class isQualification(object):
"""Qualification Route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "qualificationType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.qualification is not None:
return getattr(request.qualification, "qualificationType", None) == self.val
return False
class IsAgreement(object):
""" Agreement route predicate. """
def __init__(self, val, config):
self.val = val
def text(self):
return "agreementType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.agreement is not None:
return getattr(request.agreement, "agreementType", None) == self.val
return False
def register_framework_frameworkType(config, model):
"""Register a framework frameworkType.
:param config:
The pyramid configuration object that will be populated.
:param model:
The framework model class
"""
config.registry.framework_frameworkTypes[model.frameworkType.default] = model
def register_submission_submissionType(config, model):
submission_type = model.submissionType.default
config.registry.submission_submissionTypes[submission_type] = model
def register_qualification_qualificationType(config, model):
qualification_type = model.qualificationType.default
config.registry.qualification_qualificationTypes[qualification_type] = model
def register_agreement_agreementType(config, model):
agreement_type = model.agreementType.default
config.registry.agreement_agreementTypes[agreement_type] = model
def object_from_data(request, data, obj_name, raise_error=True, create=True):
objType = data.get("%sType" % obj_name, "electronicCatalogue")
model_types = getattr(request.registry, "%s_%sTypes" % (obj_name, obj_name))
model = model_types.get(objType)
if model is None and raise_error:
request.errors.add("body", "%sType" % obj_name, "Not implemented")
request.errors.status = 415
raise error_handler(request)
update_logging_context(request, {"%s_type" % obj_name: objType})
if model is not None and create:
model = model(data)
return model
def framework_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "framework", raise_error=raise_error, create=create)
def submission_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "submission", raise_error=raise_error, create=create)
def qualification_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "qualification", raise_error=raise_error, create=create)
def agreement_from_data(request, data, raise_error=True, create=True):
if request.authenticated_role == "agreements":
data["agreementType"] = "cfaua"
if not data.get("agreementType") and raise_error:
request.errors.add("data", "agreementType", "This field is required")
request.errors.status = 422
raise error_handler(request)
return object_from_data(request, data, "agreement", raise_error=raise_error, create=create)
def extract_doc_adapter(request, doc_type, doc_id):
doc_type_singular = doc_type[:-1] # lower, without last symbol "frameworks" --> "framework"
doc = get_doc_by_id(request.registry.databases[doc_type], doc_type_singular.capitalize(), doc_id)
if doc is None:
request.errors.add("url", "%s_id" % doc_type_singular, "Not Found")
request.errors.status = 404
raise error_handler(request)
# obsolete lowercase doc_type in agreements
if doc is not None and doc.get("doc_type") == "agreement":
request.errors.add("url", "agreement_id", "Archived")
request.errors.status = 410
raise error_handler(request)
method = getattr(request, "%s_from_data" % doc_type_singular)
return method(doc)
def extract_doc(request):
try:
# empty if mounted under a path in mod_wsgi, for example
path = decode_path_info(request.environ["PATH_INFO"] or "/")
except KeyError:
path = "/"
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end, e.reason)
# obj_id = ""
# extract object id
parts = path.split("/")
if len(parts) < 4 or parts[3] not in ("frameworks", "submissions", "qualifications", "agreements"):
return
# obj_type = parts[3][0].upper() + parts[3][1:-1]
obj_type = parts[3]
obj_id = parts[4]
return extract_doc_adapter(request, obj_type, obj_id)
def generate_framework_pretty_id(ctime, db, server_id=""):
key = ctime.date().isoformat()
prettyIDdoc = "frameworkPrettyID_" + server_id if server_id else "frameworkPrettyID"
while True:
try:
prettyID = db.get(prettyIDdoc, {"_id": prettyIDdoc})
index = prettyID.get(key, 1)
prettyID[key] = index + 1
db.save(prettyID)
except ResourceConflict: # pragma: no cover
pass
except Exception: # pragma: no cover
sleep(1)
else:
break
return "UA-F-{:04}-{:02}-{:02}-{:06}{}".format(
ctime.year, ctime.month, ctime.day, index, server_id and "-" + server_id
)
def generate_agreementID(ctime, db, server_id=""):
key = ctime.date().isoformat()
prettyIDdoc = "agreementID_" + server_id if server_id else "agreementID"
while True:
try:
agreementID = db.get(prettyIDdoc, {"_id": prettyIDdoc})
index = agreementID.get(key, 1)
agreementID[key] = index + 1
db.save(agreementID)
except ResourceConflict: # pragma: no cover
pass
except Exception: # pragma: no cover
sleep(1)
else:
break
return "UA-{:04}-{:02}-{:02}-{:06}{}".format(
ctime.year, ctime.month, ctime.day, index, server_id and "-" + server_id
)
def save_object(request, obj_name, with_test_mode=True, additional_obj_names=""):
obj = request.validated[obj_name]
if with_test_mode and obj.mode == "test":
set_modetest_titles(obj)
patch = get_revision_changes(obj.serialize("plain"), request.validated["%s_src" % obj_name])
if patch:
now = get_now()
append_obj_revision(request, obj, patch, now)
old_date_modified = obj.dateModified
if getattr(obj, "modified", True):
obj.dateModified = now
for i in additional_obj_names:
if i in request.validated:
request.validated[i].dateModified = now
with handle_store_exceptions(request):
obj.store(request.registry.databases[f"{obj_name}s"]) # TODO a better way to specify db name?
LOGGER.info(
"Saved {} {}: dateModified {} -> {}".format(
obj_name,
obj.id,
old_date_modified and old_date_modified.isoformat(),
obj.dateModified.isoformat()
),
extra=context_unpack(request, {"MESSAGE_ID": "save_{}".format(obj_name)}, {"RESULT": obj.rev}),
)
return True
def save_framework(request, additional_obj_names=""):
return save_object(request, "framework", additional_obj_names=additional_obj_names)
def save_submission(request, additional_obj_names=""):
return save_object(request, "submission", with_test_mode=False, additional_obj_names=additional_obj_names)
def save_qualification(request, additional_obj_names=""):
return save_object(request, "qualification", with_test_mode=False, additional_obj_names=additional_obj_names)
def save_agreement(request, additional_obj_names=""):
return save_object(request, "agreement", with_test_mode=False, additional_obj_names=additional_obj_names)
def get_framework_accelerator(context):
if context and "frameworkDetails" in context and context["frameworkDetails"]:
re_obj = ACCELERATOR_RE.search(context["frameworkDetails"])
if re_obj and "accelerator" in re_obj.groupdict():
return int(re_obj.groupdict()["accelerator"])
return None
def acceleratable(wrapped):
@wraps(wrapped)
def wrapper(date_obj, timedelta_obj, framework=None, working_days=False, calendar=WORKING_DAYS, **kwargs):
accelerator = get_framework_accelerator(framework)
if accelerator:
return calc_datetime(date_obj, timedelta_obj, accelerator=accelerator)
return wrapped(
date_obj, timedelta_obj, framework=framework, working_days=working_days, calendar=calendar, **kwargs
)
return wrapper
def apply_patch(request, obj_name, data=None, save=True, src=None, additional_obj_names=""):
save_map = {
"framework": save_framework,
"submission": save_submission,
"qualification": save_qualification,
"agreement": save_agreement,
}
data = request.validated["data"] if data is None else data
patch = data and apply_data_patch(src or request.context.serialize(), data)
if patch:
# Can't be replaced to "obj_name in save_map" because obj_name for child patch same as for parent
if request.context.__class__.__name__.lower() in save_map:
request.validated[obj_name].import_data(patch)
else:
request.context.import_data(patch)
if save:
save_func = save_map.get(obj_name)
return save_func(request, additional_obj_names=additional_obj_names)
def append_obj_revision(request, obj, patch, date):
status_changes = [p for p in patch if all([
p["path"].endswith("/status"),
p["op"] == "replace"
])]
changed_obj = obj
for change in status_changes:
changed_obj = resolve_pointer(obj, change["path"].replace("/status", ""))
if changed_obj and hasattr(changed_obj, "date") and hasattr(changed_obj, "revisions"):
date_path = change["path"].replace("/status", "/date")
if changed_obj.date and not any([p for p in patch if date_path == p["path"]]):
patch.append({"op": "replace", "path": date_path, "value": changed_obj.date.isoformat()})
elif not changed_obj.date:
patch.append({"op": "remove", "path": date_path})
changed_obj.date = date
else:
changed_obj = obj
return append_revision(request, changed_obj, patch)
def obj_serialize(request, framework_data, fields):
obj = request.framework_from_data(framework_data, raise_error=False)
obj.__parent__ = request.context
return dict([(i, j) for i, j in obj.serialize("view").items() if i in fields])
def agreement_serialize(request, agreement_data, fields):
agreement = request.agreement_from_data(agreement_data, raise_error=False)
agreement.__parent__ = request.context
return {i: j for i, j in agreement.serialize("view").items() if i in fields}
def get_submission_by_id(request, submission_id):
if submission_id:
return request.registry.databases.submissions.get(submission_id)
def get_framework_by_id(request, framework_id):
if framework_id:
return request.registry.databases.frameworks.get(framework_id)
def get_agreement_by_id(request, agreement_id):
if agreement_id:
return request.registry.databases.agreements.get(agreement_id)
def set_agreement_ownership(item, request):
item.owner_token = generate_id()
def get_agreement(model):
while not IAgreement.providedBy(model):
model = model.__parent__
return model
| 35.050125
| 113
| 0.687093
|
from functools import partial, wraps
from logging import getLogger
from time import sleep
from cornice.resource import resource
from couchdb import ResourceConflict
from dateorro import calc_datetime
from jsonpointer import resolve_pointer
from pyramid.compat import decode_path_info
from pyramid.exceptions import URLDecodeError
from openprocurement.api.constants import WORKING_DAYS
from openprocurement.api.utils import (
error_handler,
update_logging_context,
set_modetest_titles,
get_revision_changes,
get_now,
handle_store_exceptions,
context_unpack,
apply_data_patch,
append_revision,
get_doc_by_id,
ACCELERATOR_RE,
generate_id,
)
from openprocurement.framework.core.models import IAgreement
from openprocurement.framework.core.traversal import (
framework_factory,
submission_factory,
qualification_factory,
agreement_factory,
)
LOGGER = getLogger("openprocurement.framework.core")
ENQUIRY_PERIOD_DURATION = 10
SUBMISSION_STAND_STILL_DURATION = 30
frameworksresource = partial(resource, error_handler=error_handler, factory=framework_factory)
submissionsresource = partial(resource, error_handler=error_handler, factory=submission_factory)
qualificationsresource = partial(resource, error_handler=error_handler, factory=qualification_factory)
agreementsresource = partial(resource, error_handler=error_handler, factory=agreement_factory)
class isFramework(object):
def __init__(self, val, config):
self.val = val
def text(self):
return "frameworkType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.framework is not None:
return getattr(request.framework, "frameworkType", None) == self.val
return False
class isSubmission(object):
def __init__(self, val, config):
self.val = val
def text(self):
return "submissionType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.submission is not None:
return getattr(request.submission, "submissionType", None) == self.val
return False
class isQualification(object):
def __init__(self, val, config):
self.val = val
def text(self):
return "qualificationType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.qualification is not None:
return getattr(request.qualification, "qualificationType", None) == self.val
return False
class IsAgreement(object):
def __init__(self, val, config):
self.val = val
def text(self):
return "agreementType = %s" % (self.val,)
phash = text
def __call__(self, context, request):
if request.agreement is not None:
return getattr(request.agreement, "agreementType", None) == self.val
return False
def register_framework_frameworkType(config, model):
config.registry.framework_frameworkTypes[model.frameworkType.default] = model
def register_submission_submissionType(config, model):
submission_type = model.submissionType.default
config.registry.submission_submissionTypes[submission_type] = model
def register_qualification_qualificationType(config, model):
qualification_type = model.qualificationType.default
config.registry.qualification_qualificationTypes[qualification_type] = model
def register_agreement_agreementType(config, model):
agreement_type = model.agreementType.default
config.registry.agreement_agreementTypes[agreement_type] = model
def object_from_data(request, data, obj_name, raise_error=True, create=True):
objType = data.get("%sType" % obj_name, "electronicCatalogue")
model_types = getattr(request.registry, "%s_%sTypes" % (obj_name, obj_name))
model = model_types.get(objType)
if model is None and raise_error:
request.errors.add("body", "%sType" % obj_name, "Not implemented")
request.errors.status = 415
raise error_handler(request)
update_logging_context(request, {"%s_type" % obj_name: objType})
if model is not None and create:
model = model(data)
return model
def framework_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "framework", raise_error=raise_error, create=create)
def submission_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "submission", raise_error=raise_error, create=create)
def qualification_from_data(request, data, raise_error=True, create=True):
return object_from_data(request, data, "qualification", raise_error=raise_error, create=create)
def agreement_from_data(request, data, raise_error=True, create=True):
if request.authenticated_role == "agreements":
data["agreementType"] = "cfaua"
if not data.get("agreementType") and raise_error:
request.errors.add("data", "agreementType", "This field is required")
request.errors.status = 422
raise error_handler(request)
return object_from_data(request, data, "agreement", raise_error=raise_error, create=create)
def extract_doc_adapter(request, doc_type, doc_id):
doc_type_singular = doc_type[:-1]
doc = get_doc_by_id(request.registry.databases[doc_type], doc_type_singular.capitalize(), doc_id)
if doc is None:
request.errors.add("url", "%s_id" % doc_type_singular, "Not Found")
request.errors.status = 404
raise error_handler(request)
if doc is not None and doc.get("doc_type") == "agreement":
request.errors.add("url", "agreement_id", "Archived")
request.errors.status = 410
raise error_handler(request)
method = getattr(request, "%s_from_data" % doc_type_singular)
return method(doc)
def extract_doc(request):
try:
path = decode_path_info(request.environ["PATH_INFO"] or "/")
except KeyError:
path = "/"
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end, e.reason)
parts = path.split("/")
if len(parts) < 4 or parts[3] not in ("frameworks", "submissions", "qualifications", "agreements"):
return
obj_type = parts[3]
obj_id = parts[4]
return extract_doc_adapter(request, obj_type, obj_id)
def generate_framework_pretty_id(ctime, db, server_id=""):
key = ctime.date().isoformat()
prettyIDdoc = "frameworkPrettyID_" + server_id if server_id else "frameworkPrettyID"
while True:
try:
prettyID = db.get(prettyIDdoc, {"_id": prettyIDdoc})
index = prettyID.get(key, 1)
prettyID[key] = index + 1
db.save(prettyID)
except ResourceConflict:
pass
except Exception:
sleep(1)
else:
break
return "UA-F-{:04}-{:02}-{:02}-{:06}{}".format(
ctime.year, ctime.month, ctime.day, index, server_id and "-" + server_id
)
def generate_agreementID(ctime, db, server_id=""):
key = ctime.date().isoformat()
prettyIDdoc = "agreementID_" + server_id if server_id else "agreementID"
while True:
try:
agreementID = db.get(prettyIDdoc, {"_id": prettyIDdoc})
index = agreementID.get(key, 1)
agreementID[key] = index + 1
db.save(agreementID)
except ResourceConflict:
pass
except Exception:
sleep(1)
else:
break
return "UA-{:04}-{:02}-{:02}-{:06}{}".format(
ctime.year, ctime.month, ctime.day, index, server_id and "-" + server_id
)
def save_object(request, obj_name, with_test_mode=True, additional_obj_names=""):
obj = request.validated[obj_name]
if with_test_mode and obj.mode == "test":
set_modetest_titles(obj)
patch = get_revision_changes(obj.serialize("plain"), request.validated["%s_src" % obj_name])
if patch:
now = get_now()
append_obj_revision(request, obj, patch, now)
old_date_modified = obj.dateModified
if getattr(obj, "modified", True):
obj.dateModified = now
for i in additional_obj_names:
if i in request.validated:
request.validated[i].dateModified = now
with handle_store_exceptions(request):
obj.store(request.registry.databases[f"{obj_name}s"])
LOGGER.info(
"Saved {} {}: dateModified {} -> {}".format(
obj_name,
obj.id,
old_date_modified and old_date_modified.isoformat(),
obj.dateModified.isoformat()
),
extra=context_unpack(request, {"MESSAGE_ID": "save_{}".format(obj_name)}, {"RESULT": obj.rev}),
)
return True
def save_framework(request, additional_obj_names=""):
return save_object(request, "framework", additional_obj_names=additional_obj_names)
def save_submission(request, additional_obj_names=""):
return save_object(request, "submission", with_test_mode=False, additional_obj_names=additional_obj_names)
def save_qualification(request, additional_obj_names=""):
return save_object(request, "qualification", with_test_mode=False, additional_obj_names=additional_obj_names)
def save_agreement(request, additional_obj_names=""):
return save_object(request, "agreement", with_test_mode=False, additional_obj_names=additional_obj_names)
def get_framework_accelerator(context):
if context and "frameworkDetails" in context and context["frameworkDetails"]:
re_obj = ACCELERATOR_RE.search(context["frameworkDetails"])
if re_obj and "accelerator" in re_obj.groupdict():
return int(re_obj.groupdict()["accelerator"])
return None
def acceleratable(wrapped):
@wraps(wrapped)
def wrapper(date_obj, timedelta_obj, framework=None, working_days=False, calendar=WORKING_DAYS, **kwargs):
accelerator = get_framework_accelerator(framework)
if accelerator:
return calc_datetime(date_obj, timedelta_obj, accelerator=accelerator)
return wrapped(
date_obj, timedelta_obj, framework=framework, working_days=working_days, calendar=calendar, **kwargs
)
return wrapper
def apply_patch(request, obj_name, data=None, save=True, src=None, additional_obj_names=""):
save_map = {
"framework": save_framework,
"submission": save_submission,
"qualification": save_qualification,
"agreement": save_agreement,
}
data = request.validated["data"] if data is None else data
patch = data and apply_data_patch(src or request.context.serialize(), data)
if patch:
if request.context.__class__.__name__.lower() in save_map:
request.validated[obj_name].import_data(patch)
else:
request.context.import_data(patch)
if save:
save_func = save_map.get(obj_name)
return save_func(request, additional_obj_names=additional_obj_names)
def append_obj_revision(request, obj, patch, date):
status_changes = [p for p in patch if all([
p["path"].endswith("/status"),
p["op"] == "replace"
])]
changed_obj = obj
for change in status_changes:
changed_obj = resolve_pointer(obj, change["path"].replace("/status", ""))
if changed_obj and hasattr(changed_obj, "date") and hasattr(changed_obj, "revisions"):
date_path = change["path"].replace("/status", "/date")
if changed_obj.date and not any([p for p in patch if date_path == p["path"]]):
patch.append({"op": "replace", "path": date_path, "value": changed_obj.date.isoformat()})
elif not changed_obj.date:
patch.append({"op": "remove", "path": date_path})
changed_obj.date = date
else:
changed_obj = obj
return append_revision(request, changed_obj, patch)
def obj_serialize(request, framework_data, fields):
obj = request.framework_from_data(framework_data, raise_error=False)
obj.__parent__ = request.context
return dict([(i, j) for i, j in obj.serialize("view").items() if i in fields])
def agreement_serialize(request, agreement_data, fields):
agreement = request.agreement_from_data(agreement_data, raise_error=False)
agreement.__parent__ = request.context
return {i: j for i, j in agreement.serialize("view").items() if i in fields}
def get_submission_by_id(request, submission_id):
if submission_id:
return request.registry.databases.submissions.get(submission_id)
def get_framework_by_id(request, framework_id):
if framework_id:
return request.registry.databases.frameworks.get(framework_id)
def get_agreement_by_id(request, agreement_id):
if agreement_id:
return request.registry.databases.agreements.get(agreement_id)
def set_agreement_ownership(item, request):
item.owner_token = generate_id()
def get_agreement(model):
while not IAgreement.providedBy(model):
model = model.__parent__
return model
| true
| true
|
f71738e4c80d2271b070b3350e92aeee9a9e5ae3
| 3,954
|
py
|
Python
|
stcloud/models/update_plan_response_dto.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | 1
|
2020-05-01T12:15:52.000Z
|
2020-05-01T12:15:52.000Z
|
stcloud/models/update_plan_response_dto.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | null | null | null |
stcloud/models/update_plan_response_dto.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Sematext Cloud API
API Explorer provides access and documentation for Sematext REST API. The REST API requires the API Key to be sent as part of `Authorization` header. E.g.: `Authorization : apiKey e5f18450-205a-48eb-8589-7d49edaea813`. # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UpdatePlanResponseDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'events': 'list[EventDto]',
'plan_id': 'int'
}
attribute_map = {
'events': 'events',
'plan_id': 'planId'
}
def __init__(self, events=None, plan_id=None): # noqa: E501
"""UpdatePlanResponseDto - a model defined in Swagger""" # noqa: E501
self._events = None
self._plan_id = None
self.discriminator = None
if events is not None:
self.events = events
if plan_id is not None:
self.plan_id = plan_id
@property
def events(self):
"""Gets the events of this UpdatePlanResponseDto. # noqa: E501
:return: The events of this UpdatePlanResponseDto. # noqa: E501
:rtype: list[EventDto]
"""
return self._events
@events.setter
def events(self, events):
"""Sets the events of this UpdatePlanResponseDto.
:param events: The events of this UpdatePlanResponseDto. # noqa: E501
:type: list[EventDto]
"""
self._events = events
@property
def plan_id(self):
"""Gets the plan_id of this UpdatePlanResponseDto. # noqa: E501
:return: The plan_id of this UpdatePlanResponseDto. # noqa: E501
:rtype: int
"""
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id):
"""Sets the plan_id of this UpdatePlanResponseDto.
:param plan_id: The plan_id of this UpdatePlanResponseDto. # noqa: E501
:type: int
"""
self._plan_id = plan_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UpdatePlanResponseDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdatePlanResponseDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.861314
| 236
| 0.574608
|
import pprint
import re
import six
class UpdatePlanResponseDto(object):
swagger_types = {
'events': 'list[EventDto]',
'plan_id': 'int'
}
attribute_map = {
'events': 'events',
'plan_id': 'planId'
}
def __init__(self, events=None, plan_id=None):
self._events = None
self._plan_id = None
self.discriminator = None
if events is not None:
self.events = events
if plan_id is not None:
self.plan_id = plan_id
@property
def events(self):
return self._events
@events.setter
def events(self, events):
self._events = events
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id):
self._plan_id = plan_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UpdatePlanResponseDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, UpdatePlanResponseDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f7173966ea1c7e65d2e1a2dd36186f075d2562fc
| 5,614
|
py
|
Python
|
examples/Cliner/CliNER/code/feature_extraction/umls_dir/create_sqliteDB.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
examples/Cliner/CliNER/code/feature_extraction/umls_dir/create_sqliteDB.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
examples/Cliner/CliNER/code/feature_extraction/umls_dir/create_sqliteDB.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
# database.py creates a .db file for performing umls searches.
import atexit
import os
import sqlite3
import sys
from read_config import enabled_modules
features_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if features_dir not in sys.path:
sys.path.append(features_dir)
# find where umls tables are located
enabled = enabled_modules()
umls_tables = enabled['UMLS']
# set to True when create_db() is succesful
success = False
db_path = None
conn = None
MRSTY_TABLE_FILE = None
MRCON_TABLE_FILE = None
MRREL_TABLE_FILE = None
LRABR_TABLE_FILE = None
# this ensure files are closed properly and umls.db is removed if not succesful
@atexit.register
def umls_db_cleanup():
# pylint: disable=global-statement
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
if conn is not None:
conn.close()
if MRSTY_TABLE_FILE is not None:
MRSTY_TABLE_FILE.close()
if MRCON_TABLE_FILE is not None:
MRCON_TABLE_FILE.close()
if MRREL_TABLE_FILE is not None:
MRREL_TABLE_FILE.close()
if LRABR_TABLE_FILE is not None:
LRABR_TABLE_FILE.close()
if success is False:
# remove umls.db, it is junk now
if db_path is not None:
os.remove(db_path)
def create_db():
# pylint: disable=global-statement
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
print("\ncreating umls.db")
# connect to the .db file we are creating.
db_path = os.path.join(umls_tables, 'umls.db')
conn = sqlite3.connect(db_path)
conn.text_factory = str
print("opening files")
# load data in files.
try:
mrsty_path = os.path.join(umls_tables, 'MRSTY.RRF')
MRSTY_TABLE_FILE = open(mrsty_path, "r")
except IOError:
print("\nNo file to use for creating MRSTY.RRF table\n")
sys.exit()
try:
mrcon_path = os.path.join(umls_tables, 'MRCONSO.RRF')
MRCON_TABLE_FILE = open(mrcon_path, "r")
except IOError:
print("\nNo file to use for creating MRCONSO.RRF table\n")
sys.exit()
try:
mrrel_path = os.path.join(umls_tables, 'MRREL.RRF')
MRREL_TABLE_FILE = open(mrrel_path, "r")
except IOError:
print("\nNo file to use for creating MRREL.RRF table\n")
sys.exit()
try:
lrabr_path = os.path.join(umls_tables, 'LRABR')
LRABR_TABLE_FILE = open(lrabr_path, "r")
except IOError:
print("\nNo file to use for creating LRABR table\n")
sys.exit()
print("creating tables")
c = conn.cursor()
# create tables.
c.execute("CREATE TABLE MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) ;")
c.execute(
"CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, \
SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) ;")
c.execute(
"CREATE TABLE MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, \
RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF );")
c.execute("CREATE TABLE LRABR( EUI1, ABR, TYPE, EUI2, STR);")
print("inserting data into MRSTY table")
for line in MRSTY_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 6
c.execute("INSERT INTO MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) \
values( ?, ?, ?, ?, ?, ?)", tuple(line))
print("inserting data into MRCON table")
for line in MRCON_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 18
c.execute(
"INSERT INTO MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, \
SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) \
values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);",
tuple(line))
print("inserting data into MRREL table")
for line in MRREL_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 16
c.execute(
"INSERT INTO MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, \
RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF ) \
values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )",
tuple(line))
print("inserting into LRABR table")
for line in LRABR_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
line.pop()
assert len(line) == 5
c.execute("INSERT INTO LRABR( EUI1, ABR, TYPE, EUI2, STR) \
values( ?, ?, ?, ?,?)", tuple(line))
print("creating indices")
# create indices for faster queries
c.execute("CREATE INDEX mrsty_cui_map ON MRSTY(CUI)")
c.execute("CREATE INDEX mrcon_str_map ON MRCON(STR)")
c.execute("CREATE INDEX mrcon_cui_map ON MRCON(CUI)")
c.execute("CREATE INDEX mrrel_cui2_map ON MRREL( CUI2 )")
c.execute("CREATE INDEX mrrel_cui1_map on MRREL( CUI1 ) ")
c.execute("CREATE INDEX mrrel_rel_map on MRREL( REL )")
c.execute("CREATE INDEX lrabr_abr_map on LRABR(ABR)")
c.execute("CREATE INDEX lrabr_str_map on LRABR(STR)")
# save changes to .db
conn.commit()
success = True
print("\nsqlite database created")
if __name__ == "__main__":
create_db()
| 27.385366
| 79
| 0.612576
|
import atexit
import os
import sqlite3
import sys
from read_config import enabled_modules
features_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if features_dir not in sys.path:
sys.path.append(features_dir)
enabled = enabled_modules()
umls_tables = enabled['UMLS']
success = False
db_path = None
conn = None
MRSTY_TABLE_FILE = None
MRCON_TABLE_FILE = None
MRREL_TABLE_FILE = None
LRABR_TABLE_FILE = None
@atexit.register
def umls_db_cleanup():
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
if conn is not None:
conn.close()
if MRSTY_TABLE_FILE is not None:
MRSTY_TABLE_FILE.close()
if MRCON_TABLE_FILE is not None:
MRCON_TABLE_FILE.close()
if MRREL_TABLE_FILE is not None:
MRREL_TABLE_FILE.close()
if LRABR_TABLE_FILE is not None:
LRABR_TABLE_FILE.close()
if success is False:
if db_path is not None:
os.remove(db_path)
def create_db():
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
print("\ncreating umls.db")
db_path = os.path.join(umls_tables, 'umls.db')
conn = sqlite3.connect(db_path)
conn.text_factory = str
print("opening files")
try:
mrsty_path = os.path.join(umls_tables, 'MRSTY.RRF')
MRSTY_TABLE_FILE = open(mrsty_path, "r")
except IOError:
print("\nNo file to use for creating MRSTY.RRF table\n")
sys.exit()
try:
mrcon_path = os.path.join(umls_tables, 'MRCONSO.RRF')
MRCON_TABLE_FILE = open(mrcon_path, "r")
except IOError:
print("\nNo file to use for creating MRCONSO.RRF table\n")
sys.exit()
try:
mrrel_path = os.path.join(umls_tables, 'MRREL.RRF')
MRREL_TABLE_FILE = open(mrrel_path, "r")
except IOError:
print("\nNo file to use for creating MRREL.RRF table\n")
sys.exit()
try:
lrabr_path = os.path.join(umls_tables, 'LRABR')
LRABR_TABLE_FILE = open(lrabr_path, "r")
except IOError:
print("\nNo file to use for creating LRABR table\n")
sys.exit()
print("creating tables")
c = conn.cursor()
c.execute("CREATE TABLE MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) ;")
c.execute(
"CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, \
SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) ;")
c.execute(
"CREATE TABLE MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, \
RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF );")
c.execute("CREATE TABLE LRABR( EUI1, ABR, TYPE, EUI2, STR);")
print("inserting data into MRSTY table")
for line in MRSTY_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
line.pop()
assert len(line) == 6
c.execute("INSERT INTO MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) \
values( ?, ?, ?, ?, ?, ?)", tuple(line))
print("inserting data into MRCON table")
for line in MRCON_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
line.pop()
assert len(line) == 18
c.execute(
"INSERT INTO MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, \
SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) \
values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);",
tuple(line))
print("inserting data into MRREL table")
for line in MRREL_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
line.pop()
assert len(line) == 16
c.execute(
"INSERT INTO MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, \
RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF ) \
values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )",
tuple(line))
print("inserting into LRABR table")
for line in LRABR_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
line.pop()
assert len(line) == 5
c.execute("INSERT INTO LRABR( EUI1, ABR, TYPE, EUI2, STR) \
values( ?, ?, ?, ?,?)", tuple(line))
print("creating indices")
c.execute("CREATE INDEX mrsty_cui_map ON MRSTY(CUI)")
c.execute("CREATE INDEX mrcon_str_map ON MRCON(STR)")
c.execute("CREATE INDEX mrcon_cui_map ON MRCON(CUI)")
c.execute("CREATE INDEX mrrel_cui2_map ON MRREL( CUI2 )")
c.execute("CREATE INDEX mrrel_cui1_map on MRREL( CUI1 ) ")
c.execute("CREATE INDEX mrrel_rel_map on MRREL( REL )")
c.execute("CREATE INDEX lrabr_abr_map on LRABR(ABR)")
c.execute("CREATE INDEX lrabr_str_map on LRABR(STR)")
conn.commit()
success = True
print("\nsqlite database created")
if __name__ == "__main__":
create_db()
| true
| true
|
f7173a7583eedd9e7c316806a5cfde0de9f793f5
| 1,516
|
py
|
Python
|
rohans2dtlkit/simonsays.py
|
rohan-patra/2DGameDevToolkit
|
337b86066b306e6654cd520dd9cf60b224dc7c0d
|
[
"MIT"
] | 1
|
2020-03-25T02:39:11.000Z
|
2020-03-25T02:39:11.000Z
|
rohans2dtlkit/simonsays.py
|
rohan-patra/2DGameDevToolkit
|
337b86066b306e6654cd520dd9cf60b224dc7c0d
|
[
"MIT"
] | null | null | null |
rohans2dtlkit/simonsays.py
|
rohan-patra/2DGameDevToolkit
|
337b86066b306e6654cd520dd9cf60b224dc7c0d
|
[
"MIT"
] | null | null | null |
"""Simon Says
Exercises
1. Speed up tile flash rate.
2. Add more tiles.
"""
from random import choice
from time import sleep
from turtle import *
from rohans2dtlkit import floor, square, vector
pattern = []
guesses = []
tiles = {
vector(0, 0): ('red', 'dark red'),
vector(0, -200): ('blue', 'dark blue'),
vector(-200, 0): ('green', 'dark green'),
vector(-200, -200): ('yellow', 'khaki'),
}
def grid():
"Draw grid of tiles."
square(0, 0, 200, 'dark red')
square(0, -200, 200, 'dark blue')
square(-200, 0, 200, 'dark green')
square(-200, -200, 200, 'khaki')
update()
def flash(tile):
"Flash tile in grid."
glow, dark = tiles[tile]
square(tile.x, tile.y, 200, glow)
update()
sleep(0.5)
square(tile.x, tile.y, 200, dark)
update()
sleep(0.5)
def grow():
"Grow pattern and flash tiles."
tile = choice(list(tiles))
pattern.append(tile)
for tile in pattern:
flash(tile)
print('Pattern length:', len(pattern))
guesses.clear()
def tap(x, y):
"Respond to screen tap."
onscreenclick(None)
x = floor(x, 200)
y = floor(y, 200)
tile = vector(x, y)
index = len(guesses)
if tile != pattern[index]:
exit()
guesses.append(tile)
flash(tile)
if len(guesses) == len(pattern):
grow()
onscreenclick(tap)
def start(x, y):
"Start game."
grow()
onscreenclick(tap)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
grid()
onscreenclick(start)
done()
| 18.26506
| 47
| 0.58905
|
from random import choice
from time import sleep
from turtle import *
from rohans2dtlkit import floor, square, vector
pattern = []
guesses = []
tiles = {
vector(0, 0): ('red', 'dark red'),
vector(0, -200): ('blue', 'dark blue'),
vector(-200, 0): ('green', 'dark green'),
vector(-200, -200): ('yellow', 'khaki'),
}
def grid():
square(0, 0, 200, 'dark red')
square(0, -200, 200, 'dark blue')
square(-200, 0, 200, 'dark green')
square(-200, -200, 200, 'khaki')
update()
def flash(tile):
glow, dark = tiles[tile]
square(tile.x, tile.y, 200, glow)
update()
sleep(0.5)
square(tile.x, tile.y, 200, dark)
update()
sleep(0.5)
def grow():
tile = choice(list(tiles))
pattern.append(tile)
for tile in pattern:
flash(tile)
print('Pattern length:', len(pattern))
guesses.clear()
def tap(x, y):
onscreenclick(None)
x = floor(x, 200)
y = floor(y, 200)
tile = vector(x, y)
index = len(guesses)
if tile != pattern[index]:
exit()
guesses.append(tile)
flash(tile)
if len(guesses) == len(pattern):
grow()
onscreenclick(tap)
def start(x, y):
grow()
onscreenclick(tap)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
grid()
onscreenclick(start)
done()
| true
| true
|
f7173b7401256a6e7120034785ae4d0cbd732b2d
| 3,478
|
py
|
Python
|
Game/finger.py
|
Ravnit202/PYJAC
|
65987f8afd2e54e1b308b09f45f291e374e79bd2
|
[
"MIT"
] | 1
|
2022-02-27T21:39:46.000Z
|
2022-02-27T21:39:46.000Z
|
Game/finger.py
|
Ravnit202/PYJAC
|
65987f8afd2e54e1b308b09f45f291e374e79bd2
|
[
"MIT"
] | null | null | null |
Game/finger.py
|
Ravnit202/PYJAC
|
65987f8afd2e54e1b308b09f45f291e374e79bd2
|
[
"MIT"
] | 1
|
2022-01-23T17:56:16.000Z
|
2022-01-23T17:56:16.000Z
|
import cv2
import mediapipe
import numpy
import pydirectinput
class FingerDetector:
wScr, hScr = pydirectinput.size() #Get the current screen resolution
pX, pY = 0, 0
cX, cY = 0, 0
def __init__(self):
"""
Initialize all objects
"""
#Load the mediapipe libraries/solutions
self.initHand = mediapipe.solutions.hands
self.mainHand = self.initHand.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)
self.draw = mediapipe.solutions.drawing_utils
self.fingerTips = []
self.img = None
def handLandmarks(self, colorImg):
"""
Detect the hand landmarks
"""
landmarkList = []
landmarkPositions = self.mainHand.process(colorImg) # Process the given image
landmarkCheck = landmarkPositions.multi_hand_landmarks
if landmarkCheck: # Checks if landmarks exist
for index, hand in enumerate(landmarkCheck): # differentiate by hand
for index, landmark in enumerate(hand.landmark):
self.draw.draw_landmarks(self.img, hand, self.initHand.HAND_CONNECTIONS)
h, w, c = self.img.shape
centerX, centerY = int(landmark.x * w), int(landmark.y * h)
landmarkList.append([index, centerX, centerY])
return landmarkList
def fingers(self, landmarks):
"""
Check the action of the fingers
"""
fingerTips = []
tipIds = [4, 8, 12, 16, 20] #Values for each fingertip
#Check if the thumb is up
if landmarks[tipIds[0]][1] > self.lmList[tipIds[0] - 1][1]:
fingerTips.append(1)
else:
fingerTips.append(0)
#Check if fingers are up and the thumb is down
for id in range(1, 5):
if landmarks[tipIds[id]][2] < landmarks[tipIds[id] - 3][2]: # Checks to see if the tip of the finger is higher than the joint
fingerTips.append(1)
else:
fingerTips.append(0)
return fingerTips
def fingerDetection(self, frame):
"""
Detect the fingers positions through the frame
"""
frame = cv2.flip(frame, 1)
self.img = frame
imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Changes the format of the frames from BGR to RGB
self.lmList = self.handLandmarks(imgRGB)
if len(self.lmList) > 12:
x1, y1 = self.lmList[8][1:]
finger = self.fingers(self.lmList)
if finger[1] == 1 and finger[2] == 0:
x3 = numpy.interp(x1, (75, 720 - 75), (75, self.wScr)) # Converts the width of the window relative to the screen width
y3 = numpy.interp(y1, (75, 560 - 75), (75, self.hScr)) # Converts the height of the window relative to the screen height
cX = self.pX + (x3 - self.pX) /2 # Smooth out the mouse x movement
cY = self.pY + (y3 - self.pY) /2 # Smooth out the mouse y movement
pydirectinput.moveTo(int(cX), int(cY)) #Move the mouse using pydirectinput
self.pX, self.pY = cX, cY # Save the current x and y values
if finger[1] == 0 and finger[0] == 1: # Check if the pointer finger is down and the thumb finger is up
pydirectinput.rightClick()
return
| 37.397849
| 138
| 0.572168
|
import cv2
import mediapipe
import numpy
import pydirectinput
class FingerDetector:
wScr, hScr = pydirectinput.size()
pX, pY = 0, 0
cX, cY = 0, 0
def __init__(self):
self.initHand = mediapipe.solutions.hands
self.mainHand = self.initHand.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)
self.draw = mediapipe.solutions.drawing_utils
self.fingerTips = []
self.img = None
def handLandmarks(self, colorImg):
landmarkList = []
landmarkPositions = self.mainHand.process(colorImg)
landmarkCheck = landmarkPositions.multi_hand_landmarks
if landmarkCheck:
for index, hand in enumerate(landmarkCheck):
for index, landmark in enumerate(hand.landmark):
self.draw.draw_landmarks(self.img, hand, self.initHand.HAND_CONNECTIONS)
h, w, c = self.img.shape
centerX, centerY = int(landmark.x * w), int(landmark.y * h)
landmarkList.append([index, centerX, centerY])
return landmarkList
def fingers(self, landmarks):
fingerTips = []
tipIds = [4, 8, 12, 16, 20]
if landmarks[tipIds[0]][1] > self.lmList[tipIds[0] - 1][1]:
fingerTips.append(1)
else:
fingerTips.append(0)
for id in range(1, 5):
if landmarks[tipIds[id]][2] < landmarks[tipIds[id] - 3][2]:
fingerTips.append(1)
else:
fingerTips.append(0)
return fingerTips
def fingerDetection(self, frame):
frame = cv2.flip(frame, 1)
self.img = frame
imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.lmList = self.handLandmarks(imgRGB)
if len(self.lmList) > 12:
x1, y1 = self.lmList[8][1:]
finger = self.fingers(self.lmList)
if finger[1] == 1 and finger[2] == 0:
x3 = numpy.interp(x1, (75, 720 - 75), (75, self.wScr))
y3 = numpy.interp(y1, (75, 560 - 75), (75, self.hScr))
cX = self.pX + (x3 - self.pX) /2
cY = self.pY + (y3 - self.pY) /2
pydirectinput.moveTo(int(cX), int(cY))
self.pX, self.pY = cX, cY
if finger[1] == 0 and finger[0] == 1:
pydirectinput.rightClick()
return
| true
| true
|
f7173b8750ccd8758d98ec5020d4c31e69bddff1
| 3,395
|
py
|
Python
|
myproject/settings.py
|
sabariask/final-cloud-app-with-database
|
b95769cf8dfe77d597dff30666b82bd5478cdc1a
|
[
"Apache-2.0"
] | null | null | null |
myproject/settings.py
|
sabariask/final-cloud-app-with-database
|
b95769cf8dfe77d597dff30666b82bd5478cdc1a
|
[
"Apache-2.0"
] | null | null | null |
myproject/settings.py
|
sabariask/final-cloud-app-with-database
|
b95769cf8dfe77d597dff30666b82bd5478cdc1a
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aay0j_9b&ky3a7(8m8il+-1ud(scw12@w5!+5-=gsk6ynzi0ls'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# <HINT> add your cloud host here
ALLOWED_HOSTS = ['onlinecourseapp.eu-gb.cf.appdomain.cloud']
# Application definition
INSTALLED_APPS = [
'onlinecourse.apps.OnlinecourseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/media/'
| 26.523438
| 91
| 0.699558
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'aay0j_9b&ky3a7(8m8il+-1ud(scw12@w5!+5-=gsk6ynzi0ls'
DEBUG = True
# <HINT> add your cloud host here
ALLOWED_HOSTS = ['onlinecourseapp.eu-gb.cf.appdomain.cloud']
# Application definition
INSTALLED_APPS = [
'onlinecourse.apps.OnlinecourseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/media/'
| true
| true
|
f7173c865c6e1cbc6ede078b6fa126c1cac8a519
| 1,332
|
py
|
Python
|
test/selenium/src/lib/meta.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | null | null | null |
test/selenium/src/lib/meta.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 12
|
2015-01-08T14:50:19.000Z
|
2017-11-29T19:37:53.000Z
|
test/selenium/src/lib/meta.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 1
|
2015-01-08T13:25:09.000Z
|
2015-01-08T13:25:09.000Z
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""Metaclasses module"""
from lib import decorator, exception, constants
class DecoratePublicMethods(type):
# todo: this should be refactored to DecorateMethods and used with a
# factory
"""
Decorates all test methods with a decorator that makes a screenshot on
any exception.
"""
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if all(
[method_name in attr_name for method_name in [
constants.test_runner.TEST_METHOD_PREFIX,
constants.test_runner.TEST_METHOD_POSTFIX]
]) and callable(value):
dct[attr_name] = decorator.take_screenshot_on_error(value)
return super(DecoratePublicMethods, mcs).__new__(mcs, name, bases, dct)
class RequireDocs(type):
"""
Requires from all methods to include docstrings.
"""
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if callable(value) and not hasattr(value, "__doc__"):
raise exception.DocstringsMissing(attr_name)
return super(RequireDocs, mcs).__new__(mcs, name, bases, dct)
| 32.487805
| 78
| 0.707958
|
from lib import decorator, exception, constants
class DecoratePublicMethods(type):
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if all(
[method_name in attr_name for method_name in [
constants.test_runner.TEST_METHOD_PREFIX,
constants.test_runner.TEST_METHOD_POSTFIX]
]) and callable(value):
dct[attr_name] = decorator.take_screenshot_on_error(value)
return super(DecoratePublicMethods, mcs).__new__(mcs, name, bases, dct)
class RequireDocs(type):
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if callable(value) and not hasattr(value, "__doc__"):
raise exception.DocstringsMissing(attr_name)
return super(RequireDocs, mcs).__new__(mcs, name, bases, dct)
| true
| true
|
f7173cd9ebdb7121ecd49307a8e79a80894f89a2
| 47
|
py
|
Python
|
components/__init__.py
|
nuric/pix2rule
|
0f0f654e488a1839455786ccc4ad023c0aa0c2e8
|
[
"MIT"
] | 6
|
2021-06-15T10:37:34.000Z
|
2022-01-26T14:41:44.000Z
|
components/__init__.py
|
nuric/pix2rule
|
0f0f654e488a1839455786ccc4ad023c0aa0c2e8
|
[
"MIT"
] | 3
|
2021-08-25T15:17:22.000Z
|
2021-11-12T13:52:14.000Z
|
components/__init__.py
|
nuric/pix2rule
|
0f0f654e488a1839455786ccc4ad023c0aa0c2e8
|
[
"MIT"
] | 1
|
2021-11-08T17:36:52.000Z
|
2021-11-08T17:36:52.000Z
|
"""Reusable components used in the project."""
| 23.5
| 46
| 0.723404
| true
| true
|
|
f7173ce767b2f0ec4ef68dd6e5eb6e37168beeea
| 3,958
|
py
|
Python
|
python/oneflow/test/modules/test_ne.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_ne.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_ne.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_ne(test_case, shape, device):
arr1 = np.random.randn(*shape)
arr2 = np.random.randn(*shape)
input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))
other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))
of_out = flow.ne(input, other)
of_out2 = flow.not_equal(input, other)
np_out = np.not_equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
test_case.assertTrue(np.array_equal(of_out2.numpy(), np_out))
def _test_tensor_ne_operator(test_case, shape, device):
arr1 = np.random.randn(*shape)
arr2 = np.random.randn(*shape)
input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))
other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))
of_out = input.ne(other)
np_out = np.not_equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_ne_int(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = flow.ne(input, num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_ne_operator_int(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = input.ne(num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_ne_float(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1.0
of_out = flow.ne(input, num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_ne_operator_float(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1.0
of_out = input.ne(num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@flow.unittest.skip_unless_1n1d()
class TestNe(flow.unittest.TestCase):
def test_ne(test_case):
arg_dict = OrderedDict()
arg_dict["test_func"] = [
_test_ne,
_test_tensor_ne_operator,
_test_ne_int,
_test_tensor_ne_operator_int,
_test_ne_float,
_test_tensor_ne_operator_float,
]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(auto_backward=False, check_graph=False)
def test_ne_with_0shape_data(test_case):
device = random_device()
x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
y1 = torch.ne(x1, x2)
y2 = torch.ne(x1, 2)
y3 = torch.ne(x1, 2.0)
return (y1, y2, y3)
if __name__ == "__main__":
unittest.main()
| 33.82906
| 77
| 0.689237
|
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_ne(test_case, shape, device):
arr1 = np.random.randn(*shape)
arr2 = np.random.randn(*shape)
input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))
other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))
of_out = flow.ne(input, other)
of_out2 = flow.not_equal(input, other)
np_out = np.not_equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
test_case.assertTrue(np.array_equal(of_out2.numpy(), np_out))
def _test_tensor_ne_operator(test_case, shape, device):
arr1 = np.random.randn(*shape)
arr2 = np.random.randn(*shape)
input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))
other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))
of_out = input.ne(other)
np_out = np.not_equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_ne_int(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = flow.ne(input, num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_ne_operator_int(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = input.ne(num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_ne_float(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1.0
of_out = flow.ne(input, num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_ne_operator_float(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1.0
of_out = input.ne(num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@flow.unittest.skip_unless_1n1d()
class TestNe(flow.unittest.TestCase):
def test_ne(test_case):
arg_dict = OrderedDict()
arg_dict["test_func"] = [
_test_ne,
_test_tensor_ne_operator,
_test_ne_int,
_test_tensor_ne_operator_int,
_test_ne_float,
_test_tensor_ne_operator_float,
]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(auto_backward=False, check_graph=False)
def test_ne_with_0shape_data(test_case):
device = random_device()
x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
y1 = torch.ne(x1, x2)
y2 = torch.ne(x1, 2)
y3 = torch.ne(x1, 2.0)
return (y1, y2, y3)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f7173cf25e08228c30ad2d6b241f0a7a9258b6b2
| 2,213
|
py
|
Python
|
game/sdl/nacl/generate_nmf.py
|
spiffcode/hostile-takeover
|
363c8f7f02c25f90255b28a5e2eaea784b2a502f
|
[
"BSD-2-Clause-FreeBSD"
] | 113
|
2015-01-07T00:37:37.000Z
|
2022-03-08T17:58:24.000Z
|
game/sdl/nacl/generate_nmf.py
|
ptitSeb/hostile-takeover
|
db70254a966ca3223178e669594f3b704b5471a2
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2015-04-13T16:16:00.000Z
|
2021-06-08T04:49:09.000Z
|
game/sdl/nacl/generate_nmf.py
|
spiffcode/hostile-takeover
|
363c8f7f02c25f90255b28a5e2eaea784b2a502f
|
[
"BSD-2-Clause-FreeBSD"
] | 49
|
2015-01-08T04:27:35.000Z
|
2022-03-08T04:55:08.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2011, The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import optparse
# This script generates a JSON .nmf file, which provides the mapping to indicate
# which .nexe file to load and execute for a particular architecture.
# The script must have -nmf <filename> as an option, which designates the name
# of the .nmf file to be generated.
# One or more nexes must be specified on the command line. Each
# nexe file is preceded by an argument that specifies the architecture
# that the nexe is associated with: --x86-64, --x86-32, --arm.
#
# For example:
# generate_nmf.py --nmf test.nmf --x86-64 hello_world_x86-64.nexe \
# --x86-32 hello32.nexe
# will create test.nmf that contains 2 entries, while
#
# generate_nmf.py --nmf hello.nmf --arm arm.nexe
#
# will create hello.nmf with a single entry.
# Note: argv has been passed in without the program name in argv[0]
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--nmf', dest='nmf_file', help='nmf file to generate')
parser.add_option('--x86-64', dest='x86_64', help='x86_64 nexe')
parser.add_option('--x86-32', dest='x86_32', help='x86_32 nexe')
parser.add_option('--arm', dest='arm', help='arm nexe')
(options, args) = parser.parse_args(argv)
if options.nmf_file == None:
parser.error("nmf file not specified. Use --nmf")
# Make sure that not all nexes are None -- i.e. at least one was specified.
if options.x86_64 == None and options.x86_32 == None and options.arm == None:
parser.error("No nexe files were specified")
nmf_file = open(options.nmf_file, 'w')
nmf_file.write('{\n')
nmf_file.write(' "nexes": {\n')
# Output an entry in the manifest file for each specified architecture
if options.x86_64:
nmf_file.write(' "x86-64": "%s",\n' % options.x86_64)
if options.x86_32:
nmf_file.write(' "x86-32": "%s",\n' % options.x86_32)
if options.arm:
nmf_file.write(' "arm": "%s",\n' % options.arm)
nmf_file.write(' }\n')
nmf_file.write('}\n')
nmf_file.close()
if __name__ == '__main__':
main(sys.argv[1:])
| 36.278689
| 80
| 0.690917
|
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--nmf', dest='nmf_file', help='nmf file to generate')
parser.add_option('--x86-64', dest='x86_64', help='x86_64 nexe')
parser.add_option('--x86-32', dest='x86_32', help='x86_32 nexe')
parser.add_option('--arm', dest='arm', help='arm nexe')
(options, args) = parser.parse_args(argv)
if options.nmf_file == None:
parser.error("nmf file not specified. Use --nmf")
if options.x86_64 == None and options.x86_32 == None and options.arm == None:
parser.error("No nexe files were specified")
nmf_file = open(options.nmf_file, 'w')
nmf_file.write('{\n')
nmf_file.write(' "nexes": {\n')
if options.x86_64:
nmf_file.write(' "x86-64": "%s",\n' % options.x86_64)
if options.x86_32:
nmf_file.write(' "x86-32": "%s",\n' % options.x86_32)
if options.arm:
nmf_file.write(' "arm": "%s",\n' % options.arm)
nmf_file.write(' }\n')
nmf_file.write('}\n')
nmf_file.close()
if __name__ == '__main__':
main(sys.argv[1:])
| true
| true
|
f7173e027264e39bd5de097156f1fe290ba68ed4
| 11,074
|
py
|
Python
|
utils/utils.py
|
Ray0089/PSGMN
|
0363d558add24034e035d26121e2e1b61d97c198
|
[
"Apache-2.0"
] | 18
|
2021-02-05T05:30:15.000Z
|
2022-03-13T03:40:25.000Z
|
utils/utils.py
|
Ray0089/PSGMN
|
0363d558add24034e035d26121e2e1b61d97c198
|
[
"Apache-2.0"
] | 2
|
2021-04-17T02:20:42.000Z
|
2021-09-11T07:05:13.000Z
|
utils/utils.py
|
Ray0089/PSGMN
|
0363d558add24034e035d26121e2e1b61d97c198
|
[
"Apache-2.0"
] | 5
|
2021-04-19T00:21:20.000Z
|
2022-01-17T07:30:27.000Z
|
# import PIL
import matplotlib.pyplot as plt
import numpy as np
import math
import cv2
import torch
from torch_geometric.data import Data
def load_ply(path):
"""
Loads a 3D mesh model from a PLY file.
:param path: Path to a PLY file.
:return: The loaded model given by a dictionary with items:
'pts' (nx3 ndarray), 'normals' (nx3 ndarray), 'colors' (nx3 ndarray),
'faces' (mx3 ndarray) - the latter three are optional.
"""
f = open(path, 'r')
n_pts = 0
n_faces = 0
face_n_corners = 3 # Only triangular faces are supported
pt_props = []
face_props = []
is_binary = False
header_vertex_section = False
header_face_section = False
# Read header
while True:
line = f.readline().rstrip('\n').rstrip('\r') # Strip the newline character(s)
if line.startswith('element vertex'):
n_pts = int(line.split()[-1])
header_vertex_section = True
header_face_section = False
elif line.startswith('element face'):
n_faces = int(line.split()[-1])
header_vertex_section = False
header_face_section = True
elif line.startswith('element'): # Some other element
header_vertex_section = False
header_face_section = False
elif line.startswith('property') and header_vertex_section:
# (name of the property, data type)
pt_props.append((line.split()[-1], line.split()[-2]))
elif line.startswith('property list') and header_face_section:
elems = line.split()
if elems[-1] == 'vertex_indices':
# (name of the property, data type)
face_props.append(('n_corners', elems[2]))
for i in range(face_n_corners):
face_props.append(('ind_' + str(i), elems[3]))
else:
print('Warning: Not supported face property: ' + elems[-1])
elif line.startswith('format'):
if 'binary' in line:
is_binary = True
elif line.startswith('end_header'):
break
# Prepare data structures
model = {}
model['pts'] = np.zeros((n_pts, 3), np.float)
if n_faces > 0:
model['faces'] = np.zeros((n_faces, face_n_corners), np.float)
pt_props_names = [p[0] for p in pt_props]
is_normal = False
if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):
is_normal = True
model['normals'] = np.zeros((n_pts, 3), np.float)
is_color = False
if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):
is_color = True
model['colors'] = np.zeros((n_pts, 3), np.float)
is_texture = False
if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)):
is_texture = True
model['texture_uv'] = np.zeros((n_pts, 2), np.float)
formats = { # For binary format
'float': ('f', 4),
'double': ('d', 8),
'int': ('i', 4),
'uchar': ('B', 1)
}
# Load vertices
for pt_id in range(n_pts):
prop_vals = {}
load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz',
'red', 'green', 'blue', 'texture_u', 'texture_v']
if is_binary:
for prop in pt_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] in load_props:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for prop_id, prop in enumerate(pt_props):
if prop[0] in load_props:
prop_vals[prop[0]] = elems[prop_id]
model['pts'][pt_id, 0] = float(prop_vals['x'])
model['pts'][pt_id, 1] = float(prop_vals['y'])
model['pts'][pt_id, 2] = float(prop_vals['z'])
if is_normal:
model['normals'][pt_id, 0] = float(prop_vals['nx'])
model['normals'][pt_id, 1] = float(prop_vals['ny'])
model['normals'][pt_id, 2] = float(prop_vals['nz'])
if is_color:
model['colors'][pt_id, 0] = float(prop_vals['red'])
model['colors'][pt_id, 1] = float(prop_vals['green'])
model['colors'][pt_id, 2] = float(prop_vals['blue'])
if is_texture:
model['texture_uv'][pt_id, 0] = float(prop_vals['texture_u'])
model['texture_uv'][pt_id, 1] = float(prop_vals['texture_v'])
# Load faces
for face_id in range(n_faces):
prop_vals = {}
if is_binary:
for prop in face_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] == 'n_corners':
if val != face_n_corners:
print('Error: Only triangular faces are supported.')
print('Number of face corners: ' + str(val))
exit(-1)
else:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for prop_id, prop in enumerate(face_props):
if prop[0] == 'n_corners':
if int(elems[prop_id]) != face_n_corners:
print('Error: Only triangular faces are supported.')
print('Number of face corners: ' + str(int(elems[prop_id])))
exit(-1)
else:
prop_vals[prop[0]] = elems[prop_id]
model['faces'][face_id, 0] = int(prop_vals['ind_0'])
model['faces'][face_id, 1] = int(prop_vals['ind_1'])
model['faces'][face_id, 2] = int(prop_vals['ind_2'])
f.close()
return model
def read_ply_to_data(path):
model = load_ply(path)
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
x = model['colors']
x = x / 255.0
x -= mean
x /= std
x = np.concatenate([x,model['pts'],model['normals']],axis=-1)
x = torch.tensor(x,dtype=torch.float32)
pos = torch.tensor(model['pts'],dtype=torch.float32)
face = torch.tensor(model['faces'],dtype=torch.long).transpose(1,0)
data = Data(x = x, pos=pos,face = face)
return data
def read_mask(path, split, cls_idx=1):
if split == "train" or split == "test":
return (np.asarray(Image.open(path))[:, :, 0] != 0).astype(np.uint8)
elif split == "fuse":
return (np.asarray(Image.open(path)) == cls_idx).astype(np.uint8)
elif split == "render":
return (np.asarray(Image.open(path))).astype(np.uint8)
def mask_iou(self, output, batch):
mask_pred = torch.argmax(output["seg"], dim=1)[0].detach().cpu().numpy()
mask_gt = batch["mask"][0].detach().cpu().numpy()
iou = (mask_pred & mask_gt).sum() / (mask_pred | mask_gt).sum()
self.mask_ap.append(iou > 0.7)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def cal_error(S, y, img_shape=(480, 640)):
S = S[:, y[0, :, 0], :]
S = S.detach().cpu().numpy()
y = y.detach().cpu().numpy()
S = np.argmax(S, axis=-1)
S = S.reshape(-1)
y = y[:, :, 1].reshape(-1)
gt_pos = []
for idx in y:
v = math.floor(idx / img_shape[1])
u = idx - img_shape[1] * v
gt_pos.append([u, v])
est_pos = []
for idx in S:
v = math.floor(idx / (img_shape[1] / 2)) * 2
u = (idx - img_shape[1] / 2 * (v / 2)) * 2
est_pos.append([u, v])
gt_pos = np.array(gt_pos, dtype=np.float32)
est_pos = np.array(est_pos, dtype=np.float32)
error = np.abs(gt_pos - est_pos)
dist = np.sqrt(error[0] ** 2 + error[1] ** 2)
avg_error = np.mean(dist)
sigma = np.std(dist)
return avg_error, sigma
def project(xyz, K, RT):
"""
xyz: [N, 3]
K: [3, 3]
RT: [3, 4]
"""
xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T
xyz = np.dot(xyz, K.T)
xy = xyz[:, :2] / xyz[:, 2:]
return xy
def mesh_project(xyz, K, RT):
"""
xyz: [N, 3]
K: [3, 3]
RT: [3, 4]
"""
xyz = xyz.astype(np.float32)
K = K.astype(np.float32)
RT = RT.astype(np.float32)
xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T
z = xyz[:, 2].copy()
xyz = np.dot(xyz, K.astype(np.float32).T)
xyz = xyz / xyz[:, 2:]
xyz[:, 2] = z
return xyz
def find_neighborhold_node(model):
pts = model["pts"]
faces = model["faces"]
neighbors = [[] for i in range(pts.shape[0])]
for i in range(pts.shape[0]):
dim0, dim1 = np.where(faces == i)
for idx in faces[dim0]:
for id in idx:
if id not in neighbors[i] and id != i:
neighbors[i].append(id)
return neighbors
def bbox_from_mask(mask_img, stride=0):
mask_img = np.array(mask_img)
mask = mask_img[:, :, 0]
img_shape = mask.shape
coor = np.nonzero(mask)
coor[0].sort()
xmin = coor[0][0]
xmax = coor[0][-1]
coor[1].sort()
ymin = coor[1][0]
ymax = coor[1][-1]
if xmin >= stride:
xmin -= stride
else:
xmin = 0
if xmax + stride <= img_shape[0]:
xmax += stride
else:
xmax = img_shape[0]
if ymin >= stride:
ymin -= stride
else:
ymin = 0
if ymax + stride <= img_shape[1]:
ymax += stride
else:
ymax = img_shape[1]
return xmax, ymax, xmin, ymin
def concate_graph(x, edge, attribute):
batch_size = x.shape[0]
x_num = 0
if x.ndim == 3:
x_num = x.shape[1]
elif x.ndim == 4:
x_num = x.shape[1] * x.shape[2]
x = x.reshape(-1, x.shape[-1])
for i in range(batch_size):
edge[i, :, :] += i * x_num
edge = edge.permute(0, 2, 1)
edge = edge.reshape(-1, 2)
edge = edge.permute(1, 0)
attribute = attribute.reshape(-1, attribute.shape[-1])
return [x, edge, attribute]
def adjust_learning_rate(optimizer, epoch, init_lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = init_lr * (0.5 ** (epoch // 20))
print("LR:{}".format(lr))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def draw_error(S, y, image):
S = S[:, y[0, :, 0], :]
S = S.detach().cpu().numpy()
batch_size = S.shape[0]
y = y.detach().cpu().numpy()
img = image.detach().cpu().numpy()[0]
S = np.argmax(S, axis=-1)
S = S.reshape(-1)
y = y[:, :, 1].reshape(-1)
gt_pos = []
for idx in y:
v = math.floor(idx / img.shape[1])
u = idx - img.shape[1] * v
gt_pos.append([u, v])
est_pos = []
for idx in S:
v = math.floor(idx / (img.shape[1] / 2)) * 2
u = (idx - img.shape[1] / 2 * (v / 2)) * 2
est_pos.append([u, v])
gt_pos = np.array(gt_pos, dtype=np.float32)
est_pos = np.array(est_pos, dtype=np.float32)
if __name__ == "__main__":
img = plt.imread("/home/ray/data/LINEMOD/ape/mask/0000.png")
img = np.array(img)
bbox_from_mask(img)
| 30.506887
| 86
| 0.536753
|
import matplotlib.pyplot as plt
import numpy as np
import math
import cv2
import torch
from torch_geometric.data import Data
def load_ply(path):
f = open(path, 'r')
n_pts = 0
n_faces = 0
face_n_corners = 3
pt_props = []
face_props = []
is_binary = False
header_vertex_section = False
header_face_section = False
while True:
line = f.readline().rstrip('\n').rstrip('\r')
if line.startswith('element vertex'):
n_pts = int(line.split()[-1])
header_vertex_section = True
header_face_section = False
elif line.startswith('element face'):
n_faces = int(line.split()[-1])
header_vertex_section = False
header_face_section = True
elif line.startswith('element'):
header_vertex_section = False
header_face_section = False
elif line.startswith('property') and header_vertex_section:
pt_props.append((line.split()[-1], line.split()[-2]))
elif line.startswith('property list') and header_face_section:
elems = line.split()
if elems[-1] == 'vertex_indices':
face_props.append(('n_corners', elems[2]))
for i in range(face_n_corners):
face_props.append(('ind_' + str(i), elems[3]))
else:
print('Warning: Not supported face property: ' + elems[-1])
elif line.startswith('format'):
if 'binary' in line:
is_binary = True
elif line.startswith('end_header'):
break
model = {}
model['pts'] = np.zeros((n_pts, 3), np.float)
if n_faces > 0:
model['faces'] = np.zeros((n_faces, face_n_corners), np.float)
pt_props_names = [p[0] for p in pt_props]
is_normal = False
if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):
is_normal = True
model['normals'] = np.zeros((n_pts, 3), np.float)
is_color = False
if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):
is_color = True
model['colors'] = np.zeros((n_pts, 3), np.float)
is_texture = False
if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)):
is_texture = True
model['texture_uv'] = np.zeros((n_pts, 2), np.float)
formats = {
'float': ('f', 4),
'double': ('d', 8),
'int': ('i', 4),
'uchar': ('B', 1)
}
for pt_id in range(n_pts):
prop_vals = {}
load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz',
'red', 'green', 'blue', 'texture_u', 'texture_v']
if is_binary:
for prop in pt_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] in load_props:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for prop_id, prop in enumerate(pt_props):
if prop[0] in load_props:
prop_vals[prop[0]] = elems[prop_id]
model['pts'][pt_id, 0] = float(prop_vals['x'])
model['pts'][pt_id, 1] = float(prop_vals['y'])
model['pts'][pt_id, 2] = float(prop_vals['z'])
if is_normal:
model['normals'][pt_id, 0] = float(prop_vals['nx'])
model['normals'][pt_id, 1] = float(prop_vals['ny'])
model['normals'][pt_id, 2] = float(prop_vals['nz'])
if is_color:
model['colors'][pt_id, 0] = float(prop_vals['red'])
model['colors'][pt_id, 1] = float(prop_vals['green'])
model['colors'][pt_id, 2] = float(prop_vals['blue'])
if is_texture:
model['texture_uv'][pt_id, 0] = float(prop_vals['texture_u'])
model['texture_uv'][pt_id, 1] = float(prop_vals['texture_v'])
for face_id in range(n_faces):
prop_vals = {}
if is_binary:
for prop in face_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] == 'n_corners':
if val != face_n_corners:
print('Error: Only triangular faces are supported.')
print('Number of face corners: ' + str(val))
exit(-1)
else:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for prop_id, prop in enumerate(face_props):
if prop[0] == 'n_corners':
if int(elems[prop_id]) != face_n_corners:
print('Error: Only triangular faces are supported.')
print('Number of face corners: ' + str(int(elems[prop_id])))
exit(-1)
else:
prop_vals[prop[0]] = elems[prop_id]
model['faces'][face_id, 0] = int(prop_vals['ind_0'])
model['faces'][face_id, 1] = int(prop_vals['ind_1'])
model['faces'][face_id, 2] = int(prop_vals['ind_2'])
f.close()
return model
def read_ply_to_data(path):
model = load_ply(path)
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
x = model['colors']
x = x / 255.0
x -= mean
x /= std
x = np.concatenate([x,model['pts'],model['normals']],axis=-1)
x = torch.tensor(x,dtype=torch.float32)
pos = torch.tensor(model['pts'],dtype=torch.float32)
face = torch.tensor(model['faces'],dtype=torch.long).transpose(1,0)
data = Data(x = x, pos=pos,face = face)
return data
def read_mask(path, split, cls_idx=1):
if split == "train" or split == "test":
return (np.asarray(Image.open(path))[:, :, 0] != 0).astype(np.uint8)
elif split == "fuse":
return (np.asarray(Image.open(path)) == cls_idx).astype(np.uint8)
elif split == "render":
return (np.asarray(Image.open(path))).astype(np.uint8)
def mask_iou(self, output, batch):
mask_pred = torch.argmax(output["seg"], dim=1)[0].detach().cpu().numpy()
mask_gt = batch["mask"][0].detach().cpu().numpy()
iou = (mask_pred & mask_gt).sum() / (mask_pred | mask_gt).sum()
self.mask_ap.append(iou > 0.7)
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def cal_error(S, y, img_shape=(480, 640)):
S = S[:, y[0, :, 0], :]
S = S.detach().cpu().numpy()
y = y.detach().cpu().numpy()
S = np.argmax(S, axis=-1)
S = S.reshape(-1)
y = y[:, :, 1].reshape(-1)
gt_pos = []
for idx in y:
v = math.floor(idx / img_shape[1])
u = idx - img_shape[1] * v
gt_pos.append([u, v])
est_pos = []
for idx in S:
v = math.floor(idx / (img_shape[1] / 2)) * 2
u = (idx - img_shape[1] / 2 * (v / 2)) * 2
est_pos.append([u, v])
gt_pos = np.array(gt_pos, dtype=np.float32)
est_pos = np.array(est_pos, dtype=np.float32)
error = np.abs(gt_pos - est_pos)
dist = np.sqrt(error[0] ** 2 + error[1] ** 2)
avg_error = np.mean(dist)
sigma = np.std(dist)
return avg_error, sigma
def project(xyz, K, RT):
xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T
xyz = np.dot(xyz, K.T)
xy = xyz[:, :2] / xyz[:, 2:]
return xy
def mesh_project(xyz, K, RT):
xyz = xyz.astype(np.float32)
K = K.astype(np.float32)
RT = RT.astype(np.float32)
xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T
z = xyz[:, 2].copy()
xyz = np.dot(xyz, K.astype(np.float32).T)
xyz = xyz / xyz[:, 2:]
xyz[:, 2] = z
return xyz
def find_neighborhold_node(model):
pts = model["pts"]
faces = model["faces"]
neighbors = [[] for i in range(pts.shape[0])]
for i in range(pts.shape[0]):
dim0, dim1 = np.where(faces == i)
for idx in faces[dim0]:
for id in idx:
if id not in neighbors[i] and id != i:
neighbors[i].append(id)
return neighbors
def bbox_from_mask(mask_img, stride=0):
mask_img = np.array(mask_img)
mask = mask_img[:, :, 0]
img_shape = mask.shape
coor = np.nonzero(mask)
coor[0].sort()
xmin = coor[0][0]
xmax = coor[0][-1]
coor[1].sort()
ymin = coor[1][0]
ymax = coor[1][-1]
if xmin >= stride:
xmin -= stride
else:
xmin = 0
if xmax + stride <= img_shape[0]:
xmax += stride
else:
xmax = img_shape[0]
if ymin >= stride:
ymin -= stride
else:
ymin = 0
if ymax + stride <= img_shape[1]:
ymax += stride
else:
ymax = img_shape[1]
return xmax, ymax, xmin, ymin
def concate_graph(x, edge, attribute):
batch_size = x.shape[0]
x_num = 0
if x.ndim == 3:
x_num = x.shape[1]
elif x.ndim == 4:
x_num = x.shape[1] * x.shape[2]
x = x.reshape(-1, x.shape[-1])
for i in range(batch_size):
edge[i, :, :] += i * x_num
edge = edge.permute(0, 2, 1)
edge = edge.reshape(-1, 2)
edge = edge.permute(1, 0)
attribute = attribute.reshape(-1, attribute.shape[-1])
return [x, edge, attribute]
def adjust_learning_rate(optimizer, epoch, init_lr):
lr = init_lr * (0.5 ** (epoch // 20))
print("LR:{}".format(lr))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def draw_error(S, y, image):
S = S[:, y[0, :, 0], :]
S = S.detach().cpu().numpy()
batch_size = S.shape[0]
y = y.detach().cpu().numpy()
img = image.detach().cpu().numpy()[0]
S = np.argmax(S, axis=-1)
S = S.reshape(-1)
y = y[:, :, 1].reshape(-1)
gt_pos = []
for idx in y:
v = math.floor(idx / img.shape[1])
u = idx - img.shape[1] * v
gt_pos.append([u, v])
est_pos = []
for idx in S:
v = math.floor(idx / (img.shape[1] / 2)) * 2
u = (idx - img.shape[1] / 2 * (v / 2)) * 2
est_pos.append([u, v])
gt_pos = np.array(gt_pos, dtype=np.float32)
est_pos = np.array(est_pos, dtype=np.float32)
if __name__ == "__main__":
img = plt.imread("/home/ray/data/LINEMOD/ape/mask/0000.png")
img = np.array(img)
bbox_from_mask(img)
| true
| true
|
f7173e64baaaa455cc2055a042fb58a2903aa785
| 2,500
|
py
|
Python
|
cic/drivers/singleton.py
|
Chia-Network/internal-custody
|
672cf33bb63cad960f5576f84a6606ce471e05cb
|
[
"Apache-2.0"
] | null | null | null |
cic/drivers/singleton.py
|
Chia-Network/internal-custody
|
672cf33bb63cad960f5576f84a6606ce471e05cb
|
[
"Apache-2.0"
] | null | null | null |
cic/drivers/singleton.py
|
Chia-Network/internal-custody
|
672cf33bb63cad960f5576f84a6606ce471e05cb
|
[
"Apache-2.0"
] | 1
|
2022-02-22T22:35:24.000Z
|
2022-02-22T22:35:24.000Z
|
from typing import List, Tuple
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_spend import CoinSpend
from chia.types.condition_opcodes import ConditionOpcode
from chia.util.hash import std_hash
from chia.util.ints import uint64
from chia.wallet.puzzles.singleton_top_layer import (
SINGLETON_LAUNCHER,
SINGLETON_LAUNCHER_HASH,
P2_SINGLETON_MOD,
solution_for_singleton,
)
from cic.load_clvm import load_clvm
SINGLETON_MOD = load_clvm("singleton_top_layer_v1_1.clsp", package_or_requirement="cic.clsp.singleton")
solve_singleton = solution_for_singleton
# Return the puzzle reveal of a singleton with specific ID and innerpuz
def construct_singleton(launcher_id: bytes32, inner_puz: Program) -> Program:
return SINGLETON_MOD.curry(
(SINGLETON_MOD.get_tree_hash(), (launcher_id, SINGLETON_LAUNCHER_HASH)),
inner_puz,
)
def generate_launch_conditions_and_coin_spend(
coin: Coin,
inner_puzzle: Program,
amount: uint64,
) -> Tuple[List[Program], CoinSpend]:
if (amount % 2) == 0:
raise ValueError("Coin amount cannot be even. Subtract one mojo.")
launcher_coin = Coin(coin.name(), SINGLETON_LAUNCHER_HASH, amount)
curried_singleton: Program = construct_singleton(launcher_coin.name(), inner_puzzle)
launcher_solution = Program.to(
[
curried_singleton.get_tree_hash(),
amount,
[],
]
)
create_launcher = Program.to(
[
ConditionOpcode.CREATE_COIN,
SINGLETON_LAUNCHER_HASH,
amount,
],
)
assert_launcher_announcement = Program.to(
[
ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT,
std_hash(launcher_coin.name() + launcher_solution.get_tree_hash()),
],
)
conditions = [create_launcher, assert_launcher_announcement]
launcher_coin_spend = CoinSpend(
launcher_coin,
SINGLETON_LAUNCHER,
launcher_solution,
)
return conditions, launcher_coin_spend
def construct_p2_singleton(launcher_id: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD.get_tree_hash(), launcher_id, SINGLETON_LAUNCHER_HASH)
def solve_p2_singleton(p2_singleton_coin: Coin, singleton_inner_puzhash: bytes32) -> Program:
return Program.to([singleton_inner_puzhash, p2_singleton_coin.name()])
| 30.487805
| 103
| 0.7288
|
from typing import List, Tuple
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_spend import CoinSpend
from chia.types.condition_opcodes import ConditionOpcode
from chia.util.hash import std_hash
from chia.util.ints import uint64
from chia.wallet.puzzles.singleton_top_layer import (
SINGLETON_LAUNCHER,
SINGLETON_LAUNCHER_HASH,
P2_SINGLETON_MOD,
solution_for_singleton,
)
from cic.load_clvm import load_clvm
SINGLETON_MOD = load_clvm("singleton_top_layer_v1_1.clsp", package_or_requirement="cic.clsp.singleton")
solve_singleton = solution_for_singleton
def construct_singleton(launcher_id: bytes32, inner_puz: Program) -> Program:
return SINGLETON_MOD.curry(
(SINGLETON_MOD.get_tree_hash(), (launcher_id, SINGLETON_LAUNCHER_HASH)),
inner_puz,
)
def generate_launch_conditions_and_coin_spend(
coin: Coin,
inner_puzzle: Program,
amount: uint64,
) -> Tuple[List[Program], CoinSpend]:
if (amount % 2) == 0:
raise ValueError("Coin amount cannot be even. Subtract one mojo.")
launcher_coin = Coin(coin.name(), SINGLETON_LAUNCHER_HASH, amount)
curried_singleton: Program = construct_singleton(launcher_coin.name(), inner_puzzle)
launcher_solution = Program.to(
[
curried_singleton.get_tree_hash(),
amount,
[],
]
)
create_launcher = Program.to(
[
ConditionOpcode.CREATE_COIN,
SINGLETON_LAUNCHER_HASH,
amount,
],
)
assert_launcher_announcement = Program.to(
[
ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT,
std_hash(launcher_coin.name() + launcher_solution.get_tree_hash()),
],
)
conditions = [create_launcher, assert_launcher_announcement]
launcher_coin_spend = CoinSpend(
launcher_coin,
SINGLETON_LAUNCHER,
launcher_solution,
)
return conditions, launcher_coin_spend
def construct_p2_singleton(launcher_id: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD.get_tree_hash(), launcher_id, SINGLETON_LAUNCHER_HASH)
def solve_p2_singleton(p2_singleton_coin: Coin, singleton_inner_puzhash: bytes32) -> Program:
return Program.to([singleton_inner_puzhash, p2_singleton_coin.name()])
| true
| true
|
f7173f406336288e5f91314047a20cea6d7099ba
| 29,908
|
py
|
Python
|
tests/test_sklearn_pipeline.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 2,772
|
2020-05-04T21:03:40.000Z
|
2022-03-30T11:00:03.000Z
|
tests/test_sklearn_pipeline.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 486
|
2020-05-05T00:45:44.000Z
|
2022-03-15T01:02:31.000Z
|
tests/test_sklearn_pipeline.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 232
|
2019-11-02T22:06:38.000Z
|
2022-03-25T07:36:17.000Z
|
import unittest
import numpy as np
from sklearn import datasets
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_iris, load_diabetes
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.datasets import make_regression
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, RidgeCV
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler
import hummingbird.ml
from hummingbird.ml._utils import pandas_installed, onnx_runtime_installed
from hummingbird.ml import constants
from onnxconverter_common.data_types import (
FloatTensorType,
Int64TensorType,
StringTensorType,
)
try:
from sklearn.impute import SimpleImputer
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
try:
from sklearn.ensemble import StackingClassifier, StackingRegressor
except ImportError:
StackingClassifier = None
if pandas_installed():
import pandas
class TestSklearnPipeline(unittest.TestCase):
def test_pipeline(self):
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_pipeline2(self):
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_union_in_pipeline(self):
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
model = Pipeline(
[
("scaler1", StandardScaler()),
("union", FeatureUnion([("scaler2", StandardScaler()), ("scaler3", MinMaxScaler())])),
]
)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_floats_ints(self):
data = [[0, 0.0], [0, 0.0], [1, 1.0], [1, 1.0]]
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_1(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("num", numeric_transformer, numeric_features)])
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_string(self):
"""
TODO: Hummingbird does not yet support strings in this context. Should raise error.
When this feature is complete, change this test.
"""
# fit
titanic_url = "https://raw.githubusercontent.com/amueller/scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv"
data = pandas.read_csv(titanic_url)
X = data.drop("survived", axis=1)
y = data["survived"]
# SimpleImputer on string is not available for string
# in ONNX-ML specifications.
# So we do it beforehand.
X["pclass"].fillna("missing", inplace=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
numeric_features = ["age", "fare"]
numeric_transformer = Pipeline(steps=[("imputer", SimpleImputer(strategy="median")), ("scaler", StandardScaler())])
categorical_features = ["pclass"]
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
clf = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", LogisticRegression(solver="liblinear"))])
to_drop = {"parch", "sibsp", "cabin", "ticket", "name", "body", "home.dest", "boat", "sex", "embarked"}
X_train = X_train.copy()
X_test = X_test.copy()
X_train["pclass"] = X_train["pclass"].astype(np.int64)
X_test["pclass"] = X_test["pclass"].astype(np.int64)
X_train = X_train.drop(to_drop, axis=1)
X_test = X_test.drop(to_drop, axis=1)
clf.fit(X_train, y_train)
torch_model = hummingbird.ml.convert(clf, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
clf.predict(X_test), torch_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas_ts(self):
iris = datasets.load_iris()
X = np.array(iris.data[:, :3], np.float32) # If we don't use float32 here, with python 3.5 and torch 1.5.1 will fail.
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch.jit", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_slice(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = slice(0, 1) # ["vA", "vB"]
categorical_features = slice(3, 4) # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
# Taken from https://github.com/microsoft/hummingbird/issues/388https://github.com/microsoft/hummingbird/issues/388
def test_pipeline_pca_rf(self):
X, y = make_regression(n_samples=1000, n_features=8, n_informative=5, n_targets=1, random_state=0, shuffle=True)
pca = PCA(n_components=8, svd_solver="randomized", whiten=True)
clf = make_pipeline(StandardScaler(), pca, RandomForestRegressor(n_estimators=10, max_depth=30, random_state=0))
clf.fit(X, y)
model = hummingbird.ml.convert(clf, "pytorch")
prediction_sk = clf.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
prediction_hb = model.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
np.testing.assert_allclose(prediction_sk, prediction_hb, rtol=1e-06, atol=1e-06)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs(self):
n_features = 18
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test)
assert len(hb_model.model.graph.input) == n_features
np.testing.assert_allclose(
pipeline.predict(X), np.array(hb_model.predict(X_test)).flatten(), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs_with_schema(self):
n_features = 5
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
input_column_names = ["A", "B", "C", "D", "E"]
output_column_names = ["score"]
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
extra_config = {constants.INPUT_NAMES: input_column_names, constants.OUTPUT_NAMES: output_column_names}
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test, extra_config=extra_config)
graph_inputs = [input.name for input in hb_model.model.graph.input]
graph_outputs = [output.name for output in hb_model.model.graph.output]
assert len(hb_model.model.graph.input) == n_features
assert graph_inputs == input_column_names
assert graph_outputs == output_column_names
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_passthrough(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), passthrough=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_decision_function(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LinearSVC(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
self.assertRaises(ValueError, hummingbird.ml.convert, clf, "torch")
@unittest.skipIf(StackingClassifier is None, reason="StackingRegressor not available in scikit-learn < 0.22")
def test_stacking_regressor(self):
X, y = load_diabetes(return_X_y=True)
estimators = [("lr", RidgeCV()), ("svr", LinearSVR(random_state=42))]
reg = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
reg.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(reg, "torch")
np.testing.assert_allclose(
reg.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
if __name__ == "__main__":
unittest.main()
| 40.307278
| 127
| 0.630032
|
import unittest
import numpy as np
from sklearn import datasets
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_iris, load_diabetes
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.datasets import make_regression
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, RidgeCV
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler
import hummingbird.ml
from hummingbird.ml._utils import pandas_installed, onnx_runtime_installed
from hummingbird.ml import constants
from onnxconverter_common.data_types import (
FloatTensorType,
Int64TensorType,
StringTensorType,
)
try:
from sklearn.impute import SimpleImputer
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
try:
from sklearn.ensemble import StackingClassifier, StackingRegressor
except ImportError:
StackingClassifier = None
if pandas_installed():
import pandas
class TestSklearnPipeline(unittest.TestCase):
def test_pipeline(self):
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_pipeline2(self):
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_union_in_pipeline(self):
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
model = Pipeline(
[
("scaler1", StandardScaler()),
("union", FeatureUnion([("scaler2", StandardScaler()), ("scaler3", MinMaxScaler())])),
]
)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_floats_ints(self):
data = [[0, 0.0], [0, 0.0], [1, 1.0], [1, 1.0]]
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_1(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("num", numeric_transformer, numeric_features)])
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_string(self):
titanic_url = "https://raw.githubusercontent.com/amueller/scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv"
data = pandas.read_csv(titanic_url)
X = data.drop("survived", axis=1)
y = data["survived"]
X["pclass"].fillna("missing", inplace=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
numeric_features = ["age", "fare"]
numeric_transformer = Pipeline(steps=[("imputer", SimpleImputer(strategy="median")), ("scaler", StandardScaler())])
categorical_features = ["pclass"]
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
clf = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", LogisticRegression(solver="liblinear"))])
to_drop = {"parch", "sibsp", "cabin", "ticket", "name", "body", "home.dest", "boat", "sex", "embarked"}
X_train = X_train.copy()
X_test = X_test.copy()
X_train["pclass"] = X_train["pclass"].astype(np.int64)
X_test["pclass"] = X_test["pclass"].astype(np.int64)
X_train = X_train.drop(to_drop, axis=1)
X_test = X_test.drop(to_drop, axis=1)
clf.fit(X_train, y_train)
torch_model = hummingbird.ml.convert(clf, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
clf.predict(X_test), torch_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2]
categorical_features = [3, 4]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2]
categorical_features = [3, 4]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas_ts(self):
iris = datasets.load_iris()
X = np.array(iris.data[:, :3], np.float32)
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch.jit", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_slice(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = slice(0, 1) # ["vA", "vB"]
categorical_features = slice(3, 4) # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
# Taken from https://github.com/microsoft/hummingbird/issues/388https://github.com/microsoft/hummingbird/issues/388
def test_pipeline_pca_rf(self):
X, y = make_regression(n_samples=1000, n_features=8, n_informative=5, n_targets=1, random_state=0, shuffle=True)
pca = PCA(n_components=8, svd_solver="randomized", whiten=True)
clf = make_pipeline(StandardScaler(), pca, RandomForestRegressor(n_estimators=10, max_depth=30, random_state=0))
clf.fit(X, y)
model = hummingbird.ml.convert(clf, "pytorch")
prediction_sk = clf.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
prediction_hb = model.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
np.testing.assert_allclose(prediction_sk, prediction_hb, rtol=1e-06, atol=1e-06)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs(self):
n_features = 18
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test)
assert len(hb_model.model.graph.input) == n_features
np.testing.assert_allclose(
pipeline.predict(X), np.array(hb_model.predict(X_test)).flatten(), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs_with_schema(self):
n_features = 5
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
input_column_names = ["A", "B", "C", "D", "E"]
output_column_names = ["score"]
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
extra_config = {constants.INPUT_NAMES: input_column_names, constants.OUTPUT_NAMES: output_column_names}
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test, extra_config=extra_config)
graph_inputs = [input.name for input in hb_model.model.graph.input]
graph_outputs = [output.name for output in hb_model.model.graph.output]
assert len(hb_model.model.graph.input) == n_features
assert graph_inputs == input_column_names
assert graph_outputs == output_column_names
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_passthrough(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), passthrough=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_decision_function(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LinearSVC(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
self.assertRaises(ValueError, hummingbird.ml.convert, clf, "torch")
@unittest.skipIf(StackingClassifier is None, reason="StackingRegressor not available in scikit-learn < 0.22")
def test_stacking_regressor(self):
X, y = load_diabetes(return_X_y=True)
estimators = [("lr", RidgeCV()), ("svr", LinearSVR(random_state=42))]
reg = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
reg.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(reg, "torch")
np.testing.assert_allclose(
reg.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f7173f7ed9d0c8bc4d136449f83ae47f59a3b4aa
| 161
|
py
|
Python
|
shiyanlou_cs596-1805f3c438/design3.py
|
tongxindao/shiyanlou
|
1d002ea342deb69066c287db9935f77f49f0a09e
|
[
"Apache-2.0"
] | null | null | null |
shiyanlou_cs596-1805f3c438/design3.py
|
tongxindao/shiyanlou
|
1d002ea342deb69066c287db9935f77f49f0a09e
|
[
"Apache-2.0"
] | null | null | null |
shiyanlou_cs596-1805f3c438/design3.py
|
tongxindao/shiyanlou
|
1d002ea342deb69066c287db9935f77f49f0a09e
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
row = int(input("Enter the number of rows: "))
n = row
while n >= 0:
x = "*" * n
y = " " * (row - n)
print(y + x)
n -= 1
| 17.888889
| 46
| 0.459627
|
row = int(input("Enter the number of rows: "))
n = row
while n >= 0:
x = "*" * n
y = " " * (row - n)
print(y + x)
n -= 1
| true
| true
|
f7173fb688f43ba2ac42d7b1dfdd0e7fc7e3dcf5
| 3,784
|
py
|
Python
|
Lib/site-packages/django_extensions/validators.py
|
Nibraz15/FullTextSearch
|
79d03a9b5c0fc94219ad9a70fe57818496844660
|
[
"bzip2-1.0.6"
] | 1
|
2019-12-22T23:37:28.000Z
|
2019-12-22T23:37:28.000Z
|
Lib/site-packages/django_extensions/validators.py
|
Nibraz15/FullTextSearch
|
79d03a9b5c0fc94219ad9a70fe57818496844660
|
[
"bzip2-1.0.6"
] | 10
|
2020-06-05T21:41:01.000Z
|
2022-02-10T07:33:38.000Z
|
Lib/site-packages/django_extensions/validators.py
|
Nibraz15/FullTextSearch
|
79d03a9b5c0fc94219ad9a70fe57818496844660
|
[
"bzip2-1.0.6"
] | 3
|
2020-08-07T16:16:54.000Z
|
2020-10-12T18:06:35.000Z
|
# -*- coding: utf-8 -*-
import unicodedata
import binascii
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
@deconstructible
class NoControlCharactersValidator(object):
message = _("Control Characters like new lines or tabs are not allowed.")
code = "no_control_characters"
whitelist = None
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
if whitelist:
self.whitelist = whitelist
def __call__(self, value):
value = force_text(value)
whitelist = self.whitelist
category = unicodedata.category
for character in value:
if whitelist and character in whitelist:
continue
if category(character)[0] == "C":
params = {'value': value, 'whitelist': whitelist}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoControlCharactersValidator) and
(self.whitelist == other.whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class NoWhitespaceValidator(object):
message = _("Leading and Trailing whitespaces are not allowed.")
code = "no_whitespace"
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if value != value.strip():
params = {'value': value}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoWhitespaceValidator) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class HexValidator(object):
messages = {
'invalid': _("Only a hex string is allowed."),
'length': _("Invalid length. Must be %(length)d characters."),
'min_length': _("Ensure that there are more than %(min)s characters."),
'max_length': _("Ensure that there are no more than %(max)s characters."),
}
code = "hex_only"
def __init__(self, length=None, min_length=None, max_length=None, message=None, code=None):
self.length = length
self.min_length = min_length
self.max_length = max_length
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if self.length and len(value) != self.length:
raise ValidationError(self.messages['length'], code='hex_only_length', params={'length': self.length})
if self.min_length and len(value) < self.min_length:
raise ValidationError(self.messages['min_length'], code='hex_only_min_length', params={'min': self.min_length})
if self.max_length and len(value) < self.max_length:
raise ValidationError(self.messages['max_length'], code='hex_only_max_length', params={'max': self.max_length})
try:
binascii.unhexlify(value)
except (TypeError, binascii.Error):
raise ValidationError(self.messages['invalid'], code='hex_only')
def __eq__(self, other):
return (
isinstance(other, HexValidator) and
(self.message == other.message) and
(self.code == other.code)
)
| 34.4
| 123
| 0.624736
|
import unicodedata
import binascii
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
@deconstructible
class NoControlCharactersValidator(object):
message = _("Control Characters like new lines or tabs are not allowed.")
code = "no_control_characters"
whitelist = None
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
if whitelist:
self.whitelist = whitelist
def __call__(self, value):
value = force_text(value)
whitelist = self.whitelist
category = unicodedata.category
for character in value:
if whitelist and character in whitelist:
continue
if category(character)[0] == "C":
params = {'value': value, 'whitelist': whitelist}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoControlCharactersValidator) and
(self.whitelist == other.whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class NoWhitespaceValidator(object):
message = _("Leading and Trailing whitespaces are not allowed.")
code = "no_whitespace"
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if value != value.strip():
params = {'value': value}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoWhitespaceValidator) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class HexValidator(object):
messages = {
'invalid': _("Only a hex string is allowed."),
'length': _("Invalid length. Must be %(length)d characters."),
'min_length': _("Ensure that there are more than %(min)s characters."),
'max_length': _("Ensure that there are no more than %(max)s characters."),
}
code = "hex_only"
def __init__(self, length=None, min_length=None, max_length=None, message=None, code=None):
self.length = length
self.min_length = min_length
self.max_length = max_length
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if self.length and len(value) != self.length:
raise ValidationError(self.messages['length'], code='hex_only_length', params={'length': self.length})
if self.min_length and len(value) < self.min_length:
raise ValidationError(self.messages['min_length'], code='hex_only_min_length', params={'min': self.min_length})
if self.max_length and len(value) < self.max_length:
raise ValidationError(self.messages['max_length'], code='hex_only_max_length', params={'max': self.max_length})
try:
binascii.unhexlify(value)
except (TypeError, binascii.Error):
raise ValidationError(self.messages['invalid'], code='hex_only')
def __eq__(self, other):
return (
isinstance(other, HexValidator) and
(self.message == other.message) and
(self.code == other.code)
)
| true
| true
|
f71740b8d42b1368ce90e20d97da178845afeb85
| 2,344
|
py
|
Python
|
nodes/lcm_to_ros/xtion/rgbd_t.py
|
mrfmap/mrfmap_ros
|
8c1e108860ff297f39591d97f8f8ce2937b29a51
|
[
"BSD-3-Clause"
] | 6
|
2020-07-15T21:00:49.000Z
|
2021-05-12T07:16:38.000Z
|
nodes/lcm_to_ros/xtion/rgbd_t.py
|
mrfmap/mrfmap_ros
|
8c1e108860ff297f39591d97f8f8ce2937b29a51
|
[
"BSD-3-Clause"
] | 2
|
2020-08-14T16:16:21.000Z
|
2020-11-12T07:43:22.000Z
|
nodes/lcm_to_ros/xtion/rgbd_t.py
|
mrfmap/mrfmap_ros
|
8c1e108860ff297f39591d97f8f8ce2937b29a51
|
[
"BSD-3-Clause"
] | 2
|
2020-08-16T15:53:14.000Z
|
2021-05-12T07:16:41.000Z
|
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class rgbd_t(object):
__slots__ = ["utime", "width", "height", "rgblen", "depthlen", "rgb", "depth"]
__typenames__ = ["int64_t", "int32_t", "int32_t", "int32_t", "int32_t", "byte", "byte"]
__dimensions__ = [None, None, None, None, None, ["rgblen"], ["depthlen"]]
def __init__(self):
self.utime = 0
self.width = 0
self.height = 0
self.rgblen = 0
self.depthlen = 0
self.rgb = ""
self.depth = ""
def encode(self):
buf = BytesIO()
buf.write(rgbd_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">qiiii", self.utime, self.width, self.height, self.rgblen, self.depthlen))
buf.write(bytearray(self.rgb[:self.rgblen]))
buf.write(bytearray(self.depth[:self.depthlen]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != rgbd_t._get_packed_fingerprint():
raise ValueError("Decode error")
return rgbd_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = rgbd_t()
self.utime, self.width, self.height, self.rgblen, self.depthlen = struct.unpack(">qiiii", buf.read(24))
self.rgb = buf.read(self.rgblen)
self.depth = buf.read(self.depthlen)
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if rgbd_t in parents: return 0
tmphash = (0x9765ad14343d07fc) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if rgbd_t._packed_fingerprint is None:
rgbd_t._packed_fingerprint = struct.pack(">Q", rgbd_t._get_hash_recursive([]))
return rgbd_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| 32.555556
| 111
| 0.638652
|
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class rgbd_t(object):
__slots__ = ["utime", "width", "height", "rgblen", "depthlen", "rgb", "depth"]
__typenames__ = ["int64_t", "int32_t", "int32_t", "int32_t", "int32_t", "byte", "byte"]
__dimensions__ = [None, None, None, None, None, ["rgblen"], ["depthlen"]]
def __init__(self):
self.utime = 0
self.width = 0
self.height = 0
self.rgblen = 0
self.depthlen = 0
self.rgb = ""
self.depth = ""
def encode(self):
buf = BytesIO()
buf.write(rgbd_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">qiiii", self.utime, self.width, self.height, self.rgblen, self.depthlen))
buf.write(bytearray(self.rgb[:self.rgblen]))
buf.write(bytearray(self.depth[:self.depthlen]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != rgbd_t._get_packed_fingerprint():
raise ValueError("Decode error")
return rgbd_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = rgbd_t()
self.utime, self.width, self.height, self.rgblen, self.depthlen = struct.unpack(">qiiii", buf.read(24))
self.rgb = buf.read(self.rgblen)
self.depth = buf.read(self.depthlen)
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if rgbd_t in parents: return 0
tmphash = (0x9765ad14343d07fc) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if rgbd_t._packed_fingerprint is None:
rgbd_t._packed_fingerprint = struct.pack(">Q", rgbd_t._get_hash_recursive([]))
return rgbd_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| true
| true
|
f717410eba8874ad5c80c1bd9eb7064162476ab8
| 967
|
py
|
Python
|
Trakttv.bundle/Contents/Tests/plex_mock/models.py
|
disrupted/Trakttv.bundle
|
24712216c71f3b22fd58cb5dd89dad5bb798ed60
|
[
"RSA-MD"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
Trakttv.bundle/Contents/Tests/plex_mock/models.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
Trakttv.bundle/Contents/Tests/plex_mock/models.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
class LibraryMetadata(object):
def __init__(self, section=None):
self.section = section
class LibrarySection(object):
def __init__(self, title=None):
self.title = title
class Session(object):
def __init__(self, **kwargs):
self.rating_key = None
self.state = None
self.duration = None
self.view_offset = None
self.part = None
self.update(**kwargs)
@property
def payload(self):
return {
'rating_key': self.rating_key,
'view_offset': self.view_offset,
'part': self.part
}
def save(self):
pass
def update(self, **kwargs):
for key, value in kwargs.items():
if not hasattr(self, key):
raise KeyError('Unknown attribute with key %r', key)
setattr(self, key, value)
def __repr__(self):
return '<Session state: %r>' % (
self.state
)
| 21.977273
| 68
| 0.553257
|
class LibraryMetadata(object):
def __init__(self, section=None):
self.section = section
class LibrarySection(object):
def __init__(self, title=None):
self.title = title
class Session(object):
def __init__(self, **kwargs):
self.rating_key = None
self.state = None
self.duration = None
self.view_offset = None
self.part = None
self.update(**kwargs)
@property
def payload(self):
return {
'rating_key': self.rating_key,
'view_offset': self.view_offset,
'part': self.part
}
def save(self):
pass
def update(self, **kwargs):
for key, value in kwargs.items():
if not hasattr(self, key):
raise KeyError('Unknown attribute with key %r', key)
setattr(self, key, value)
def __repr__(self):
return '<Session state: %r>' % (
self.state
)
| true
| true
|
f717418e6536d845980ad96232de54991a5746ec
| 12,804
|
py
|
Python
|
app/controller.py
|
Effenberg0x0/ci_edit
|
ea78621164152b1f489cae8e53994fad52c01c16
|
[
"Apache-2.0"
] | 1
|
2019-01-21T07:35:14.000Z
|
2019-01-21T07:35:14.000Z
|
app/controller.py
|
Effenberg0x0/ci_edit
|
ea78621164152b1f489cae8e53994fad52c01c16
|
[
"Apache-2.0"
] | null | null | null |
app/controller.py
|
Effenberg0x0/ci_edit
|
ea78621164152b1f489cae8e53994fad52c01c16
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for key bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import app.config
import app.curses_util
import app.log
#import app.window
class Controller:
"""A Controller is a keyboard mapping from keyboard/mouse events to editor
commands."""
def __init__(self, view, name):
if app.config.strict_debug:
assert issubclass(self.__class__, Controller)
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.textBuffer = None
self.name = name
def parentController(self):
view = self.view.parent
while view is not None:
if view.controller is not None:
return view.controller
view = view.parent
def changeToConfirmClose(self):
self.findAndChangeTo('confirmClose')
def changeToConfirmOverwrite(self):
self.findAndChangeTo('confirmOverwrite')
def changeToFileManagerWindow(self, *args):
self.findAndChangeTo('fileManagerWindow')
def changeToConfirmQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToHostWindow(self, *args):
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.view.__class__, app.window.Window), self.view
assert issubclass(host.__class__, app.window.Window), host
self.view.changeFocusTo(host)
def changeToInputWindow(self, *args):
self.findAndChangeTo('inputWindow')
def changeToFind(self):
self.findAndChangeTo('interactiveFind')
def changeToFindPrior(self):
curses.ungetch(self.savedCh)
self.findAndChangeTo('interactiveFind')
def changeToGoto(self):
self.findAndChangeTo('interactiveGoto')
def changeToPaletteWindow(self):
self.findAndChangeTo('paletteWindow')
def changeToPopup(self):
self.findAndChangeTo('popupWindow')
def changeToPrediction(self):
self.findAndChangeTo('predictionWindow')
#self.findAndChangeTo('interactivePrediction')
def changeToPrompt(self):
self.findAndChangeTo('interactivePrompt')
def changeToQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToSaveAs(self):
view = self.getNamedWindow('fileManagerWindow')
view.setMode('saveAs')
view.bringToFront()
view.changeFocusTo(view)
def createNewTextBuffer(self):
bufferManager = self.view.program.bufferManager
self.view.setTextBuffer(bufferManager.newTextBuffer())
def doCommand(self, ch, meta):
# Check the commandSet for the input with both its string and integer
# representation.
self.savedCh = ch
cmd = (self.commandSet.get(ch) or
self.commandSet.get(app.curses_util.cursesKeyName(ch)))
if cmd:
cmd()
else:
self.commandDefault(ch, meta)
self.textBuffer.compoundChangePush()
def getNamedWindow(self, windowName):
view = self.view
while view is not None:
if hasattr(view, windowName):
return getattr(view, windowName)
view = view.parent
app.log.fatal(windowName + ' not found')
return None
def currentInputWindow(self):
return self.getNamedWindow('inputWindow')
def findAndChangeTo(self, windowName):
window = self.getNamedWindow(windowName)
window.bringToFront()
self.view.changeFocusTo(window)
def changeTo(self, window):
window.bringToFront()
self.view.changeFocusTo(window)
def focus(self):
app.log.info('base controller focus()')
def confirmationPromptFinish(self, *args):
window = self.getNamedWindow('inputWindow')
window.userIntent = 'edit'
window.bringToFront()
self.view.changeFocusTo(window)
def __closeHostFile(self, host):
"""Close the current file and switch to another or create an empty
file."""
bufferManager = host.program.bufferManager
bufferManager.closeTextBuffer(host.textBuffer)
host.userIntent = 'edit'
tb = bufferManager.getUnsavedBuffer()
if not tb:
tb = bufferManager.nextBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
host.setTextBuffer(tb)
def closeFile(self):
app.log.info()
host = self.getNamedWindow('inputWindow')
self.__closeHostFile(host)
self.confirmationPromptFinish()
def closeOrConfirmClose(self):
"""If the file is clean, close it. If it is dirty, prompt the user
about whether to lose unsaved changes."""
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isDirty():
self.__closeHostFile(host)
return
if host.userIntent == 'edit':
host.userIntent = 'close'
self.changeToConfirmClose()
def initiateClose(self):
"""Called from input window controller."""
self.view.userIntent = 'close'
tb = self.view.textBuffer
if not tb.isDirty():
self.__closeHostFile(self.view)
return
self.view.changeFocusTo(self.view.confirmClose)
def initiateQuit(self):
"""Called from input window controller."""
self.view.userIntent = 'quit'
tb = self.view.textBuffer
if tb.isDirty():
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
self.view.setTextBuffer(tb)
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager.debugLog()
self.view.quitNow()
def initiateSave(self):
"""Called from input window controller."""
self.view.userIntent = 'edit'
tb = self.view.textBuffer
if tb.fullPath:
if not tb.isSafeToWrite():
self.view.changeFocusTo(self.view.confirmOverwrite)
return
tb.fileWrite()
return
self.changeToSaveAs()
def overwriteHostFile(self):
"""Close the current file and switch to another or create an empty
file.
"""
host = self.getNamedWindow('inputWindow')
host.textBuffer.fileWrite()
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
def nextFocusableWindow(self):
window = self.view.parent.nextFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def priorFocusableWindow(self):
window = self.view.parent.priorFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def writeOrConfirmOverwrite(self):
"""Ask whether the file should be overwritten."""
app.log.debug()
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isSafeToWrite():
self.changeToConfirmOverwrite()
return
tb.fileWrite()
# TODO(dschuyler): Is there a deeper issue here that necessitates saving
# the message? Does this only need to wrap the changeToHostWindow()?
# Store the save message so it is not overwritten.
saveMessage = tb.message
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
tb.message = saveMessage # Restore the save message.
def quitOrSwitchToConfirmQuit(self):
app.log.debug(self, self.view)
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
host.userIntent = 'quit'
if tb.isDirty():
self.changeToConfirmQuit()
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
host.setTextBuffer(tb)
self.changeToConfirmQuit()
return
bufferManager.debugLog()
host.quitNow()
def saveOrChangeToSaveAs(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.__class__, Controller), self
assert issubclass(self.view.__class__, app.window.Window), self
assert issubclass(host.__class__, app.window.Window), self
assert self.view.textBuffer is self.textBuffer
assert self.view.textBuffer is not host.textBuffer
if host.textBuffer.fullPath:
self.writeOrConfirmOverwrite()
return
self.changeToSaveAs()
def onChange(self):
pass
def saveEventChangeToHostWindow(self, *args):
curses.ungetch(self.savedCh)
host = self.getNamedWindow('inputWindow')
host.bringToFront()
self.view.changeFocusTo(host)
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(textBuffer.__class__,
app.text_buffer.TextBuffer), textBuffer
assert self.view.textBuffer is textBuffer
self.textBuffer = textBuffer
def unfocus(self):
pass
class MainController:
"""The different keyboard mappings are different controllers. This class
manages a collection of keyboard mappings and allows the user to switch
between them."""
def __init__(self, view):
if app.config.strict_debug:
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.controllers = {}
self.controller = None
def add(self, controller):
self.controllers[controller.name] = controller
self.controller = controller
def doCommand(self, ch, meta):
self.controller.doCommand(ch, meta)
def focus(self):
app.log.info('MainController.focus')
self.controller.focus()
if 0:
self.commandDefault = self.controller.commandDefault
commandSet = self.controller.commandSet.copy()
commandSet.update({
app.curses_util.KEY_F2: self.nextController,
})
self.controller.commandSet = commandSet
def onChange(self):
self.controller.onChange()
def nextController(self):
app.log.info('nextController')
if 0:
if self.controller is self.controllers['cuaPlus']:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
elif self.controller is self.controllers['cua']:
app.log.info('MainController.nextController emacs')
self.controller = self.controllers['emacs']
elif self.controller is self.controllers['emacs']:
app.log.info('MainController.nextController vi')
self.controller = self.controllers['vi']
else:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
self.controller.setTextBuffer(self.textBuffer)
self.focus()
def setTextBuffer(self, textBuffer):
app.log.info('MainController.setTextBuffer', self.controller)
if app.config.strict_debug:
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.textBuffer = textBuffer
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.controller.unfocus()
| 33.873016
| 80
| 0.637848
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import app.config
import app.curses_util
import app.log
class Controller:
def __init__(self, view, name):
if app.config.strict_debug:
assert issubclass(self.__class__, Controller)
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.textBuffer = None
self.name = name
def parentController(self):
view = self.view.parent
while view is not None:
if view.controller is not None:
return view.controller
view = view.parent
def changeToConfirmClose(self):
self.findAndChangeTo('confirmClose')
def changeToConfirmOverwrite(self):
self.findAndChangeTo('confirmOverwrite')
def changeToFileManagerWindow(self, *args):
self.findAndChangeTo('fileManagerWindow')
def changeToConfirmQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToHostWindow(self, *args):
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.view.__class__, app.window.Window), self.view
assert issubclass(host.__class__, app.window.Window), host
self.view.changeFocusTo(host)
def changeToInputWindow(self, *args):
self.findAndChangeTo('inputWindow')
def changeToFind(self):
self.findAndChangeTo('interactiveFind')
def changeToFindPrior(self):
curses.ungetch(self.savedCh)
self.findAndChangeTo('interactiveFind')
def changeToGoto(self):
self.findAndChangeTo('interactiveGoto')
def changeToPaletteWindow(self):
self.findAndChangeTo('paletteWindow')
def changeToPopup(self):
self.findAndChangeTo('popupWindow')
def changeToPrediction(self):
self.findAndChangeTo('predictionWindow')
def changeToPrompt(self):
self.findAndChangeTo('interactivePrompt')
def changeToQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToSaveAs(self):
view = self.getNamedWindow('fileManagerWindow')
view.setMode('saveAs')
view.bringToFront()
view.changeFocusTo(view)
def createNewTextBuffer(self):
bufferManager = self.view.program.bufferManager
self.view.setTextBuffer(bufferManager.newTextBuffer())
def doCommand(self, ch, meta):
self.savedCh = ch
cmd = (self.commandSet.get(ch) or
self.commandSet.get(app.curses_util.cursesKeyName(ch)))
if cmd:
cmd()
else:
self.commandDefault(ch, meta)
self.textBuffer.compoundChangePush()
def getNamedWindow(self, windowName):
view = self.view
while view is not None:
if hasattr(view, windowName):
return getattr(view, windowName)
view = view.parent
app.log.fatal(windowName + ' not found')
return None
def currentInputWindow(self):
return self.getNamedWindow('inputWindow')
def findAndChangeTo(self, windowName):
window = self.getNamedWindow(windowName)
window.bringToFront()
self.view.changeFocusTo(window)
def changeTo(self, window):
window.bringToFront()
self.view.changeFocusTo(window)
def focus(self):
app.log.info('base controller focus()')
def confirmationPromptFinish(self, *args):
window = self.getNamedWindow('inputWindow')
window.userIntent = 'edit'
window.bringToFront()
self.view.changeFocusTo(window)
def __closeHostFile(self, host):
bufferManager = host.program.bufferManager
bufferManager.closeTextBuffer(host.textBuffer)
host.userIntent = 'edit'
tb = bufferManager.getUnsavedBuffer()
if not tb:
tb = bufferManager.nextBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
host.setTextBuffer(tb)
def closeFile(self):
app.log.info()
host = self.getNamedWindow('inputWindow')
self.__closeHostFile(host)
self.confirmationPromptFinish()
def closeOrConfirmClose(self):
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isDirty():
self.__closeHostFile(host)
return
if host.userIntent == 'edit':
host.userIntent = 'close'
self.changeToConfirmClose()
def initiateClose(self):
self.view.userIntent = 'close'
tb = self.view.textBuffer
if not tb.isDirty():
self.__closeHostFile(self.view)
return
self.view.changeFocusTo(self.view.confirmClose)
def initiateQuit(self):
self.view.userIntent = 'quit'
tb = self.view.textBuffer
if tb.isDirty():
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
self.view.setTextBuffer(tb)
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager.debugLog()
self.view.quitNow()
def initiateSave(self):
self.view.userIntent = 'edit'
tb = self.view.textBuffer
if tb.fullPath:
if not tb.isSafeToWrite():
self.view.changeFocusTo(self.view.confirmOverwrite)
return
tb.fileWrite()
return
self.changeToSaveAs()
def overwriteHostFile(self):
host = self.getNamedWindow('inputWindow')
host.textBuffer.fileWrite()
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
def nextFocusableWindow(self):
window = self.view.parent.nextFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def priorFocusableWindow(self):
window = self.view.parent.priorFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def writeOrConfirmOverwrite(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isSafeToWrite():
self.changeToConfirmOverwrite()
return
tb.fileWrite()
saveMessage = tb.message
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
tb.message = saveMessage
def quitOrSwitchToConfirmQuit(self):
app.log.debug(self, self.view)
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
host.userIntent = 'quit'
if tb.isDirty():
self.changeToConfirmQuit()
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
host.setTextBuffer(tb)
self.changeToConfirmQuit()
return
bufferManager.debugLog()
host.quitNow()
def saveOrChangeToSaveAs(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.__class__, Controller), self
assert issubclass(self.view.__class__, app.window.Window), self
assert issubclass(host.__class__, app.window.Window), self
assert self.view.textBuffer is self.textBuffer
assert self.view.textBuffer is not host.textBuffer
if host.textBuffer.fullPath:
self.writeOrConfirmOverwrite()
return
self.changeToSaveAs()
def onChange(self):
pass
def saveEventChangeToHostWindow(self, *args):
curses.ungetch(self.savedCh)
host = self.getNamedWindow('inputWindow')
host.bringToFront()
self.view.changeFocusTo(host)
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(textBuffer.__class__,
app.text_buffer.TextBuffer), textBuffer
assert self.view.textBuffer is textBuffer
self.textBuffer = textBuffer
def unfocus(self):
pass
class MainController:
def __init__(self, view):
if app.config.strict_debug:
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.controllers = {}
self.controller = None
def add(self, controller):
self.controllers[controller.name] = controller
self.controller = controller
def doCommand(self, ch, meta):
self.controller.doCommand(ch, meta)
def focus(self):
app.log.info('MainController.focus')
self.controller.focus()
if 0:
self.commandDefault = self.controller.commandDefault
commandSet = self.controller.commandSet.copy()
commandSet.update({
app.curses_util.KEY_F2: self.nextController,
})
self.controller.commandSet = commandSet
def onChange(self):
self.controller.onChange()
def nextController(self):
app.log.info('nextController')
if 0:
if self.controller is self.controllers['cuaPlus']:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
elif self.controller is self.controllers['cua']:
app.log.info('MainController.nextController emacs')
self.controller = self.controllers['emacs']
elif self.controller is self.controllers['emacs']:
app.log.info('MainController.nextController vi')
self.controller = self.controllers['vi']
else:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
self.controller.setTextBuffer(self.textBuffer)
self.focus()
def setTextBuffer(self, textBuffer):
app.log.info('MainController.setTextBuffer', self.controller)
if app.config.strict_debug:
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.textBuffer = textBuffer
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.controller.unfocus()
| true
| true
|
f717419b82396fef65c67279f9002be3e2b4df00
| 4,704
|
py
|
Python
|
scratchpad/basic_async_server.py
|
cnb0/katcp-python
|
35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c
|
[
"BSD-3-Clause"
] | 8
|
2015-02-25T20:13:54.000Z
|
2019-09-12T06:12:07.000Z
|
scratchpad/basic_async_server.py
|
cnb0/katcp-python
|
35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c
|
[
"BSD-3-Clause"
] | 67
|
2015-01-12T09:58:36.000Z
|
2021-05-12T14:23:26.000Z
|
scratchpad/basic_async_server.py
|
cnb0/katcp-python
|
35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c
|
[
"BSD-3-Clause"
] | 15
|
2015-04-28T13:18:28.000Z
|
2021-01-19T16:16:33.000Z
|
# Copyright 2016 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
from __future__ import absolute_import, division, print_function
import random
import signal
import time
import tornado
from katcp import AsyncReply, DeviceServer, ProtocolFlags, Sensor
from katcp.kattypes import (Discrete, Float, Str, Timestamp, request,
return_reply)
server_host = ""
server_port = 5000
class MyServer(DeviceServer):
VERSION_INFO = ("example-api", 1, 0)
BUILD_INFO = ("example-implementation", 0, 1, "")
# Optionally set the KATCP protocol version and features. Defaults to
# the latest implemented version of KATCP, with all supported optional
# features
PROTOCOL_INFO = ProtocolFlags(5, 0, set([
ProtocolFlags.MULTI_CLIENT,
ProtocolFlags.MESSAGE_IDS,
]))
FRUIT = [
"apple", "banana", "pear", "kiwi",
]
def setup_sensors(self):
"""Setup some server sensors."""
self._add_result = Sensor.float("add.result",
"Last ?add result.", "", [-10000, 10000])
self._add_result.set_value(0, Sensor.UNREACHABLE)
self._time_result = Sensor.timestamp("time.result",
"Last ?time result.", "")
self._time_result.set_value(0, Sensor.INACTIVE)
self._eval_result = Sensor.string("eval.result",
"Last ?eval result.", "")
self._eval_result.set_value('', Sensor.UNKNOWN)
self._fruit_result = Sensor.discrete("fruit.result",
"Last ?pick-fruit result.", "", self.FRUIT)
self._fruit_result.set_value('apple', Sensor.ERROR)
self.add_sensor(self._add_result)
self.add_sensor(self._time_result)
self.add_sensor(self._eval_result)
self.add_sensor(self._fruit_result)
@request(Float(), Float())
@return_reply(Float())
def request_add(self, req, x, y):
"""Add two numbers"""
r = x + y
self._add_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Timestamp())
def request_time(self, req):
"""Return the current time in seconds since the Unix Epoch."""
r = time.time()
self._time_result.set_value(r)
return ("ok", r)
@request(Str())
@return_reply(Str())
def request_eval(self, req, expression):
"""Evaluate a Python expression."""
r = str(eval(expression))
self._eval_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Discrete(FRUIT))
def request_pick_fruit(self, req):
"""Pick a random fruit."""
r = random.choice(self.FRUIT + [None])
if r is None:
return ("fail", "No fruit.")
delay = random.randrange(1,5)
req.inform("Picking will take %d seconds" % delay)
def pick_handler():
self._fruit_result.set_value(r)
req.reply("ok", r)
self.ioloop.add_callback(
self.ioloop.call_later, delay, pick_handler)
raise AsyncReply
@request(Str())
@return_reply()
def request_set_sensor_inactive(self, req, sensor_name):
"""Set sensor status to inactive"""
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.INACTIVE, ts)
return('ok',)
@request(Str())
@return_reply()
def request_set_sensor_unreachable(self, req, sensor_name):
"""Set sensor status to unreachable"""
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.UNREACHABLE, ts)
return('ok',)
def request_raw_reverse(self, req, msg):
"""
A raw request handler to demonstrate the calling convention if
@request decoraters are not used. Reverses the message arguments.
"""
# msg is a katcp.Message.request object
reversed_args = msg.arguments[::-1]
# req.make_reply() makes a katcp.Message.reply using the correct request
# name and message ID
return req.make_reply(*reversed_args)
@tornado.gen.coroutine
def on_shutdown(ioloop, server):
print('Shutting down')
yield server.stop()
ioloop.stop()
if __name__ == "__main__":
ioloop = tornado.ioloop.IOLoop.current()
server = MyServer(server_host, server_port)
server.set_concurrency_options(thread_safe=False, handler_thread=False)
server.set_ioloop(ioloop)
signal.signal(signal.SIGINT, lambda sig, frame: ioloop.add_callback_from_signal(
on_shutdown, ioloop, server))
ioloop.add_callback(server.start)
ioloop.start()
| 32
| 89
| 0.639881
|
from __future__ import absolute_import, division, print_function
import random
import signal
import time
import tornado
from katcp import AsyncReply, DeviceServer, ProtocolFlags, Sensor
from katcp.kattypes import (Discrete, Float, Str, Timestamp, request,
return_reply)
server_host = ""
server_port = 5000
class MyServer(DeviceServer):
VERSION_INFO = ("example-api", 1, 0)
BUILD_INFO = ("example-implementation", 0, 1, "")
PROTOCOL_INFO = ProtocolFlags(5, 0, set([
ProtocolFlags.MULTI_CLIENT,
ProtocolFlags.MESSAGE_IDS,
]))
FRUIT = [
"apple", "banana", "pear", "kiwi",
]
def setup_sensors(self):
self._add_result = Sensor.float("add.result",
"Last ?add result.", "", [-10000, 10000])
self._add_result.set_value(0, Sensor.UNREACHABLE)
self._time_result = Sensor.timestamp("time.result",
"Last ?time result.", "")
self._time_result.set_value(0, Sensor.INACTIVE)
self._eval_result = Sensor.string("eval.result",
"Last ?eval result.", "")
self._eval_result.set_value('', Sensor.UNKNOWN)
self._fruit_result = Sensor.discrete("fruit.result",
"Last ?pick-fruit result.", "", self.FRUIT)
self._fruit_result.set_value('apple', Sensor.ERROR)
self.add_sensor(self._add_result)
self.add_sensor(self._time_result)
self.add_sensor(self._eval_result)
self.add_sensor(self._fruit_result)
@request(Float(), Float())
@return_reply(Float())
def request_add(self, req, x, y):
r = x + y
self._add_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Timestamp())
def request_time(self, req):
r = time.time()
self._time_result.set_value(r)
return ("ok", r)
@request(Str())
@return_reply(Str())
def request_eval(self, req, expression):
r = str(eval(expression))
self._eval_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Discrete(FRUIT))
def request_pick_fruit(self, req):
r = random.choice(self.FRUIT + [None])
if r is None:
return ("fail", "No fruit.")
delay = random.randrange(1,5)
req.inform("Picking will take %d seconds" % delay)
def pick_handler():
self._fruit_result.set_value(r)
req.reply("ok", r)
self.ioloop.add_callback(
self.ioloop.call_later, delay, pick_handler)
raise AsyncReply
@request(Str())
@return_reply()
def request_set_sensor_inactive(self, req, sensor_name):
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.INACTIVE, ts)
return('ok',)
@request(Str())
@return_reply()
def request_set_sensor_unreachable(self, req, sensor_name):
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.UNREACHABLE, ts)
return('ok',)
def request_raw_reverse(self, req, msg):
reversed_args = msg.arguments[::-1]
return req.make_reply(*reversed_args)
@tornado.gen.coroutine
def on_shutdown(ioloop, server):
print('Shutting down')
yield server.stop()
ioloop.stop()
if __name__ == "__main__":
ioloop = tornado.ioloop.IOLoop.current()
server = MyServer(server_host, server_port)
server.set_concurrency_options(thread_safe=False, handler_thread=False)
server.set_ioloop(ioloop)
signal.signal(signal.SIGINT, lambda sig, frame: ioloop.add_callback_from_signal(
on_shutdown, ioloop, server))
ioloop.add_callback(server.start)
ioloop.start()
| true
| true
|
f71741bac27bc1f5ba082d55bf8dd41a6deacf3b
| 2,588
|
py
|
Python
|
Estructura de datos y su procesamiento/Actividad8-TuplaNominada3.py
|
fernandomireles/University
|
5ebf3cf3e3093a8853cc4903e6f617dda7df4336
|
[
"MIT"
] | null | null | null |
Estructura de datos y su procesamiento/Actividad8-TuplaNominada3.py
|
fernandomireles/University
|
5ebf3cf3e3093a8853cc4903e6f617dda7df4336
|
[
"MIT"
] | null | null | null |
Estructura de datos y su procesamiento/Actividad8-TuplaNominada3.py
|
fernandomireles/University
|
5ebf3cf3e3093a8853cc4903e6f617dda7df4336
|
[
"MIT"
] | 1
|
2021-08-18T15:21:23.000Z
|
2021-08-18T15:21:23.000Z
|
"""
Codificar un algoritmo en Python que permita registrar la clave (Por el momento,
no esn ecesario validar si la clave es unica), el nombre y correo electrónico
de múltiples personas, hasta que el usuario indique que ha concluído con la captura
correspondiente (proponga usted el mecanismo para esto).
Una vez concluída la captura, se deberá desplegar el listado completo de las personas registradas.
NOTA: Puede elegir utilizar tupla nominadas (recomendable) o bien, listas anidadadas"""
SEPARADOR = ("*" * 20)
from collections import namedtuple # Librería para tuplas nominadas
Personas = namedtuple("Personas",["clave","nombre","correoElectronico"]) # Declaración de estructura de tupla
ListaPersonas=[] # Lista vacía para meter datos (tuplas)
while True: #Menú de opciones polivalentes
print("\n-- Bienvenido(a) al Menu")
print("1) Agregar una persona")
print("2) Búsqueda específica")
print("3) Ver listado completo")
print("4) Salir")
opcionElegida = input("> ")
if opcionElegida == "4": # Salida
print("Gracias por usar el programa, buen día")
break
if opcionElegida == "1": # Agregar persona
clave = input("Porfavor ingrese su clave: ")
nombre = input("Porfavor ingrese su nombre: ")
correoElectronico = input("Porfavor introduzca su correo electrónico: ")
TuplaPersona = Personas(clave,nombre,correoElectronico) # Se organiza la tupla temporal
print(SEPARADOR)
ListaPersonas.append(TuplaPersona) # Se almacena en lista la tupla temporal
if opcionElegida == "2": # Búsqueda específica
if ListaPersonas:
claveBuscado = input("Ingrese la clave a buscar: ")
for busqueda in ListaPersonas:
if(busqueda.clave) == claveBuscado:
print("\nHemos encontrado la clave:", claveBuscado)
print("El nombre es:", busqueda.nombre, "y su correo:", busqueda.correoElectronico)
else:
print("No se encuentra ningun registro")
if opcionElegida == "3": # Impresión de listado completo
if ListaPersonas:
print("\nListado completo de personas:")
print("|{:<10}|{:<15}|{:<25}|".format("Clave","Nombre","Correo electrónico"))
for entrada in ListaPersonas: # Ciclo "for" para impresión vertical
print("|{:<10}|{:<15}|{:<25}|".format(entrada.clave,entrada.nombre,entrada.correoElectronico))
else:
print("No se encuentra ningún registro")
| 47.925926
| 111
| 0.651468
|
SEPARADOR = ("*" * 20)
from collections import namedtuple
Personas = namedtuple("Personas",["clave","nombre","correoElectronico"])
ListaPersonas=[]
while True:
print("\n-- Bienvenido(a) al Menu")
print("1) Agregar una persona")
print("2) Búsqueda específica")
print("3) Ver listado completo")
print("4) Salir")
opcionElegida = input("> ")
if opcionElegida == "4":
print("Gracias por usar el programa, buen día")
break
if opcionElegida == "1":
clave = input("Porfavor ingrese su clave: ")
nombre = input("Porfavor ingrese su nombre: ")
correoElectronico = input("Porfavor introduzca su correo electrónico: ")
TuplaPersona = Personas(clave,nombre,correoElectronico)
print(SEPARADOR)
ListaPersonas.append(TuplaPersona)
if opcionElegida == "2":
if ListaPersonas:
claveBuscado = input("Ingrese la clave a buscar: ")
for busqueda in ListaPersonas:
if(busqueda.clave) == claveBuscado:
print("\nHemos encontrado la clave:", claveBuscado)
print("El nombre es:", busqueda.nombre, "y su correo:", busqueda.correoElectronico)
else:
print("No se encuentra ningun registro")
if opcionElegida == "3":
if ListaPersonas:
print("\nListado completo de personas:")
print("|{:<10}|{:<15}|{:<25}|".format("Clave","Nombre","Correo electrónico"))
for entrada in ListaPersonas:
print("|{:<10}|{:<15}|{:<25}|".format(entrada.clave,entrada.nombre,entrada.correoElectronico))
else:
print("No se encuentra ningún registro")
| true
| true
|
f717423cc3d548d9864cbc9e2e9fcc26f024bacd
| 1,833
|
py
|
Python
|
backend/mlarchive/bin/check_spam_legacy.py
|
dkg/mailarch
|
562757c09e212c202c35231d7e7c588cd4d3fb65
|
[
"BSD-3-Clause"
] | 6
|
2022-03-09T23:10:28.000Z
|
2022-03-21T05:32:40.000Z
|
backend/mlarchive/bin/check_spam_legacy.py
|
dkg/mailarch
|
562757c09e212c202c35231d7e7c588cd4d3fb65
|
[
"BSD-3-Clause"
] | 5
|
2022-03-11T09:39:47.000Z
|
2022-03-30T16:48:09.000Z
|
backend/mlarchive/bin/check_spam_legacy.py
|
dkg/mailarch
|
562757c09e212c202c35231d7e7c588cd4d3fb65
|
[
"BSD-3-Clause"
] | 4
|
2022-03-04T15:36:19.000Z
|
2022-03-28T23:45:44.000Z
|
#!../../../env/bin/python
"""
Script to scan through archive of mbox files and produce a spam report.
"""
# Standalone broilerplate -------------------------------------------------------------
from django_setup import do_setup
do_setup()
# -------------------------------------------------------------------------------------
import argparse
import email
import logging
import os
import shutil
import subprocess
import sys
from django.conf import settings
from mlarchive.bin.scan_utils import get_messages
progname = sys.argv[0]
from django.utils.log import getLogger
import logging.config
logging.config.dictConfig(settings.LOGGING)
logger = getLogger('mlarchive.custom')
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
os.chmod(path,0o2777)
def main():
parser = argparse.ArgumentParser(description='Scan archive for spam.')
parser.add_argument('path')
parser.add_argument('-v','--verbose', help='verbose output',action='store_true')
args = parser.parse_args()
if not os.path.isdir(args.path):
parser.error('{} must be a directory'.format(args.path))
fullnames = [ os.path.join(args.path,n) for n in os.listdir(args.path) ]
elists = list(filter(os.path.isdir,fullnames))
for elist in elists:
total = 0
spam = 0
for msg in get_messages(elist):
total += 1
# scan
p = subprocess.Popen(['spamc','-c'], stdin=subprocess.PIPE)
p.communicate(input=msg.as_string())
if p.returncode != 0:
# the message is spam
spam += 1
if args.verbose:
print("%s: spam" % elist)
# print stats
print("{}, {}:{}".format(os.path.basename(elist),total,spam))
if __name__ == "__main__":
main()
| 27.772727
| 87
| 0.585379
|
from django_setup import do_setup
do_setup()
import argparse
import email
import logging
import os
import shutil
import subprocess
import sys
from django.conf import settings
from mlarchive.bin.scan_utils import get_messages
progname = sys.argv[0]
from django.utils.log import getLogger
import logging.config
logging.config.dictConfig(settings.LOGGING)
logger = getLogger('mlarchive.custom')
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
os.chmod(path,0o2777)
def main():
parser = argparse.ArgumentParser(description='Scan archive for spam.')
parser.add_argument('path')
parser.add_argument('-v','--verbose', help='verbose output',action='store_true')
args = parser.parse_args()
if not os.path.isdir(args.path):
parser.error('{} must be a directory'.format(args.path))
fullnames = [ os.path.join(args.path,n) for n in os.listdir(args.path) ]
elists = list(filter(os.path.isdir,fullnames))
for elist in elists:
total = 0
spam = 0
for msg in get_messages(elist):
total += 1
p = subprocess.Popen(['spamc','-c'], stdin=subprocess.PIPE)
p.communicate(input=msg.as_string())
if p.returncode != 0:
spam += 1
if args.verbose:
print("%s: spam" % elist)
print("{}, {}:{}".format(os.path.basename(elist),total,spam))
if __name__ == "__main__":
main()
| true
| true
|
f717427fe7f805450f84f4ef7ba5df39674ef6cb
| 348
|
py
|
Python
|
ja/code_snippets/api-embeds-enable.py
|
quotecenter/documentation-1
|
f365703264761aa2b19d5d1d8ec55a3a6082ef4d
|
[
"BSD-3-Clause"
] | null | null | null |
ja/code_snippets/api-embeds-enable.py
|
quotecenter/documentation-1
|
f365703264761aa2b19d5d1d8ec55a3a6082ef4d
|
[
"BSD-3-Clause"
] | null | null | null |
ja/code_snippets/api-embeds-enable.py
|
quotecenter/documentation-1
|
f365703264761aa2b19d5d1d8ec55a3a6082ef4d
|
[
"BSD-3-Clause"
] | null | null | null |
from datadog import initialize, api
# Intialize request parameters including API/APP key
options = {
'api_key': '<YOUR_API_KEY>',
'app_key': '<YOUR_APP_KEY>'
}
initialize(**options)
# Set Embed ID (token)
embed_id = "5f585b01c81b12ecdf5f40df0382738d0919170639985d3df5e2fc4232865b0c"
# Call Embed API function
api.Embed.enable(embed_id)
| 21.75
| 77
| 0.761494
|
from datadog import initialize, api
options = {
'api_key': '<YOUR_API_KEY>',
'app_key': '<YOUR_APP_KEY>'
}
initialize(**options)
embed_id = "5f585b01c81b12ecdf5f40df0382738d0919170639985d3df5e2fc4232865b0c"
api.Embed.enable(embed_id)
| true
| true
|
f71742b2238dd40bc0984373a331e975450b3324
| 2,827
|
py
|
Python
|
tests/TestAptChefProvisionerPlugin.py
|
dhellmann/aminator
|
96efa7d5690bfae2c20b21f0b417b2784f6cb085
|
[
"Apache-2.0"
] | null | null | null |
tests/TestAptChefProvisionerPlugin.py
|
dhellmann/aminator
|
96efa7d5690bfae2c20b21f0b417b2784f6cb085
|
[
"Apache-2.0"
] | null | null | null |
tests/TestAptChefProvisionerPlugin.py
|
dhellmann/aminator
|
96efa7d5690bfae2c20b21f0b417b2784f6cb085
|
[
"Apache-2.0"
] | 1
|
2020-01-06T16:18:22.000Z
|
2020-01-06T16:18:22.000Z
|
# -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import logging
import json
from aminator.config import Config
from aminator.plugins.provisioner.apt_chef import AptChefProvisionerPlugin
log = logging.getLogger(__name__)
console = logging.StreamHandler()
# add the handler to the root logger
logging.getLogger('').addHandler(console)
class TestAptChefProvisionerPlugin(object):
def setup_method(self, method):
self.chef_provisioner = AptChefProvisionerPlugin()
self.chef_provisioner._config = Config()
self.chef_provisioner._config.context = Config()
self.chef_provisioner._config.context.chef = Config()
self.chef_provisioner._config.context.package = Config()
self.chef_provisioner._config.pkg_attributes = ['name', 'version', 'release', 'build_job', 'build_number']
self.chef_provisioner._config.context.chef.dir = "./tests"
self.chef_provisioner._config.context.chef.json = "test_chef_node.json"
def test_parse_json(self):
# given a JSON doc, what's the name, version, release string, etc
# this is more a direct test of the ChefJSON mapping
with open(self.chef_provisioner._get_chef_json_full_path()) as chef_json_file:
my_json = json.load(chef_json_file)
assert "helloworld" == my_json['name']
assert "APP-helloworld" == my_json['build_job']
assert "1.0" == my_json['version']
assert "277" == my_json['release']
assert "33a9d1cac7686c8a46c1f330add2e8d36850fd15" == my_json['change']
assert isinstance(my_json['run_list'], list)
assert "recipe[helloworld]" == my_json['run_list'][0]
def test_metadata(self):
self.chef_provisioner._store_package_metadata()
assert "helloworld" == self.chef_provisioner._config.context.package.attributes['name']
assert "1.0" == self.chef_provisioner._config.context.package.attributes['version']
assert "277" == self.chef_provisioner._config.context.package.attributes['release']
assert "APP-helloworld" == self.chef_provisioner._config.context.package.attributes['build_job']
assert "277" == self.chef_provisioner._config.context.package.attributes['build_number']
| 41.573529
| 114
| 0.710294
|
import logging
import json
from aminator.config import Config
from aminator.plugins.provisioner.apt_chef import AptChefProvisionerPlugin
log = logging.getLogger(__name__)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
class TestAptChefProvisionerPlugin(object):
def setup_method(self, method):
self.chef_provisioner = AptChefProvisionerPlugin()
self.chef_provisioner._config = Config()
self.chef_provisioner._config.context = Config()
self.chef_provisioner._config.context.chef = Config()
self.chef_provisioner._config.context.package = Config()
self.chef_provisioner._config.pkg_attributes = ['name', 'version', 'release', 'build_job', 'build_number']
self.chef_provisioner._config.context.chef.dir = "./tests"
self.chef_provisioner._config.context.chef.json = "test_chef_node.json"
def test_parse_json(self):
# this is more a direct test of the ChefJSON mapping
with open(self.chef_provisioner._get_chef_json_full_path()) as chef_json_file:
my_json = json.load(chef_json_file)
assert "helloworld" == my_json['name']
assert "APP-helloworld" == my_json['build_job']
assert "1.0" == my_json['version']
assert "277" == my_json['release']
assert "33a9d1cac7686c8a46c1f330add2e8d36850fd15" == my_json['change']
assert isinstance(my_json['run_list'], list)
assert "recipe[helloworld]" == my_json['run_list'][0]
def test_metadata(self):
self.chef_provisioner._store_package_metadata()
assert "helloworld" == self.chef_provisioner._config.context.package.attributes['name']
assert "1.0" == self.chef_provisioner._config.context.package.attributes['version']
assert "277" == self.chef_provisioner._config.context.package.attributes['release']
assert "APP-helloworld" == self.chef_provisioner._config.context.package.attributes['build_job']
assert "277" == self.chef_provisioner._config.context.package.attributes['build_number']
| true
| true
|
f71742c671520b22e56777c333e9e3fd20648561
| 2,331
|
py
|
Python
|
tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center Get Site Count data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD9BdB9034Df99Dba(object):
"""Get Site Count request schema definition."""
def __init__(self):
super(JSONSchemaValidatorD9BdB9034Df99Dba, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"type": [
"string",
"null"
]
},
"version": {
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.830986
| 78
| 0.639211
|
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD9BdB9034Df99Dba(object):
def __init__(self):
super(JSONSchemaValidatorD9BdB9034Df99Dba, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"type": [
"string",
"null"
]
},
"version": {
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| true
| true
|
f71744883794a583ccf9436508cd8c98a384800a
| 1,579
|
py
|
Python
|
artascope/test/lib/test_user_status_manager.py
|
magus0219/icloud-photo-downloader
|
6334530d971cf61089d031de99a38f204c201837
|
[
"MIT"
] | 3
|
2020-09-24T16:19:28.000Z
|
2022-02-09T21:10:11.000Z
|
artascope/test/lib/test_user_status_manager.py
|
magus0219/icloud-photo-downloader
|
6334530d971cf61089d031de99a38f204c201837
|
[
"MIT"
] | null | null | null |
artascope/test/lib/test_user_status_manager.py
|
magus0219/icloud-photo-downloader
|
6334530d971cf61089d031de99a38f204c201837
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[magus0219@gmail.com] on 2020/3/30
import datetime
from artascope.src.lib.user_status_manager import usm
class TestUserStatusManager:
def test_add_user(self):
usm.add_user(username="username")
us = usm.get_user(username="username")
assert (
us.username == "username"
and type(us.created_ts) == int
and us.created_ts < datetime.datetime.now().timestamp()
)
def test_add_existed_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
def test_get_user(self):
assert usm.get_user("not_exist") is None
usm.add_user(username="username")
assert usm.get_user("username").username == "username"
def test_exist_user(self):
assert usm.exist_user("not_exist") is False
usm.add_user(username="username")
assert usm.exist_user("username") is True
def test_get_all_user(self):
assert usm.get_all_user() is None
usm.add_user(username="username1")
usm.add_user(username="username2")
us_list = usm.get_all_user()
assert us_list[0].username == "username1"
assert us_list[1].username == "username2"
assert len(us_list) == 2
def test_add_dup_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
| 28.709091
| 67
| 0.636479
|
import datetime
from artascope.src.lib.user_status_manager import usm
class TestUserStatusManager:
def test_add_user(self):
usm.add_user(username="username")
us = usm.get_user(username="username")
assert (
us.username == "username"
and type(us.created_ts) == int
and us.created_ts < datetime.datetime.now().timestamp()
)
def test_add_existed_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
def test_get_user(self):
assert usm.get_user("not_exist") is None
usm.add_user(username="username")
assert usm.get_user("username").username == "username"
def test_exist_user(self):
assert usm.exist_user("not_exist") is False
usm.add_user(username="username")
assert usm.exist_user("username") is True
def test_get_all_user(self):
assert usm.get_all_user() is None
usm.add_user(username="username1")
usm.add_user(username="username2")
us_list = usm.get_all_user()
assert us_list[0].username == "username1"
assert us_list[1].username == "username2"
assert len(us_list) == 2
def test_add_dup_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
| true
| true
|
f7174560bad50e0fdbb28b5776553cba721ab30f
| 2,919
|
py
|
Python
|
swagger_client/models/all_of_permission_set_administration_rights.py
|
ike709/tgs4-api-pyclient
|
97918cfe614cc4ef06ef2485efff163417a8cd44
|
[
"MIT"
] | null | null | null |
swagger_client/models/all_of_permission_set_administration_rights.py
|
ike709/tgs4-api-pyclient
|
97918cfe614cc4ef06ef2485efff163417a8cd44
|
[
"MIT"
] | null | null | null |
swagger_client/models/all_of_permission_set_administration_rights.py
|
ike709/tgs4-api-pyclient
|
97918cfe614cc4ef06ef2485efff163417a8cd44
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
TGS API
A production scale tool for BYOND server management # noqa: E501
OpenAPI spec version: 9.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.administration_rights import AdministrationRights # noqa: F401,E501
class AllOfPermissionSetAdministrationRights(AdministrationRights):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
if hasattr(AdministrationRights, "swagger_types"):
swagger_types.update(AdministrationRights.swagger_types)
attribute_map = {
}
if hasattr(AdministrationRights, "attribute_map"):
attribute_map.update(AdministrationRights.attribute_map)
def __init__(self, *args, **kwargs): # noqa: E501
"""AllOfPermissionSetAdministrationRights - a model defined in Swagger""" # noqa: E501
self.discriminator = None
AdministrationRights.__init__(self, *args, **kwargs)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AllOfPermissionSetAdministrationRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AllOfPermissionSetAdministrationRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.076923
| 95
| 0.59815
|
import pprint
import re
import six
from swagger_client.models.administration_rights import AdministrationRights
class AllOfPermissionSetAdministrationRights(AdministrationRights):
swagger_types = {
}
if hasattr(AdministrationRights, "swagger_types"):
swagger_types.update(AdministrationRights.swagger_types)
attribute_map = {
}
if hasattr(AdministrationRights, "attribute_map"):
attribute_map.update(AdministrationRights.attribute_map)
def __init__(self, *args, **kwargs):
self.discriminator = None
AdministrationRights.__init__(self, *args, **kwargs)
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AllOfPermissionSetAdministrationRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AllOfPermissionSetAdministrationRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f71747aaed07f08c55da22df67f2dc5bc6ae9a92
| 742
|
py
|
Python
|
ocr/paint.py
|
BumagniyPacket/ocr
|
f2651f3a23cf835a689b35a658ef3443086fd72a
|
[
"Apache-2.0"
] | null | null | null |
ocr/paint.py
|
BumagniyPacket/ocr
|
f2651f3a23cf835a689b35a658ef3443086fd72a
|
[
"Apache-2.0"
] | null | null | null |
ocr/paint.py
|
BumagniyPacket/ocr
|
f2651f3a23cf835a689b35a658ef3443086fd72a
|
[
"Apache-2.0"
] | 1
|
2019-02-07T19:56:33.000Z
|
2019-02-07T19:56:33.000Z
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
def show_image(image):
plt.imshow(-image, cmap='Greys')
plt.show()
def show_two(image1, image2):
plt.subplot(121)
plt.imshow(-image1, cmap='Greys')
plt.subplot(122)
plt.imshow(-image2, cmap='Greys')
plt.show()
def plot_hist(img):
plt.hist(img.ravel(), 256, range=(0., 1.), color='red')
plt.show()
def plot_2img_2hist(image1, image2):
plt.subplot(221)
plt.imshow(-image1, cmap='Greys')
plt.subplot(223)
plt.hist(image1.ravel(), 256, range=(0., 1.), color='red')
plt.subplot(222)
plt.imshow(-image2, cmap='Greys')
plt.subplot(224)
plt.hist(image2.ravel(), 256, range=(0., 1.), color='red')
plt.show()
| 18.55
| 62
| 0.610512
|
import matplotlib.pyplot as plt
def show_image(image):
plt.imshow(-image, cmap='Greys')
plt.show()
def show_two(image1, image2):
plt.subplot(121)
plt.imshow(-image1, cmap='Greys')
plt.subplot(122)
plt.imshow(-image2, cmap='Greys')
plt.show()
def plot_hist(img):
plt.hist(img.ravel(), 256, range=(0., 1.), color='red')
plt.show()
def plot_2img_2hist(image1, image2):
plt.subplot(221)
plt.imshow(-image1, cmap='Greys')
plt.subplot(223)
plt.hist(image1.ravel(), 256, range=(0., 1.), color='red')
plt.subplot(222)
plt.imshow(-image2, cmap='Greys')
plt.subplot(224)
plt.hist(image2.ravel(), 256, range=(0., 1.), color='red')
plt.show()
| true
| true
|
f7174853e5691cb8f3c8388d4ff3c6a48d541046
| 21,576
|
py
|
Python
|
gpMgmt/bin/gppylib/commands/base.py
|
abhisheknishant138/gpdb
|
1805743d505837026aa137cabb8a7072d745a129
|
[
"PostgreSQL",
"Apache-2.0"
] | 4
|
2017-11-28T08:12:58.000Z
|
2020-10-28T04:15:52.000Z
|
gpMgmt/bin/gppylib/commands/base.py
|
abhisheknishant138/gpdb
|
1805743d505837026aa137cabb8a7072d745a129
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/commands/base.py
|
abhisheknishant138/gpdb
|
1805743d505837026aa137cabb8a7072d745a129
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
base.py
common base for the commands execution framework. Units of work are defined as Operations
as found in other modules like unix.py. These units of work are then packaged up and executed
within a GpCommand. A GpCommand is just a common infrastructure for executing an Operation.
The general idea is that the application developer breaks the problem down into a set of
GpCommands that need to be executed. This class also provides a queue and set of workers
for executing this set of commands.
"""
from queue import Queue, Empty
from threading import Thread
import os
import signal
import subprocess
import sys
import time
from gppylib import gplog
from gppylib import gpsubprocess
from pg import DB
logger = gplog.get_default_logger()
GPHOME = os.environ.get('GPHOME')
# Maximum retries if sshd rejects the connection due to too many
# unauthenticated connections.
SSH_MAX_RETRY = 10
# Delay before retrying ssh connection, in seconds
SSH_RETRY_DELAY = .5
class WorkerPool(object):
"""TODO:"""
halt_command = 'halt command'
def __init__(self, numWorkers=16, items=None, daemonize=False, logger=gplog.get_default_logger()):
if numWorkers <= 0:
raise Exception("WorkerPool(): numWorkers should be greater than 0.")
self.workers = []
self.should_stop = False
self.work_queue = Queue()
self.completed_queue = Queue()
self._assigned = 0
self.daemonize = daemonize
self.logger = logger
if items is not None:
for item in items:
self.addCommand(item)
for i in range(0, numWorkers):
w = Worker("worker%d" % i, self)
self.workers.append(w)
w.start()
self.numWorkers = numWorkers
###
def getNumWorkers(self):
return self.numWorkers
def getNextWorkItem(self):
return self.work_queue.get(block=True)
def addFinishedWorkItem(self, command):
self.completed_queue.put(command)
self.work_queue.task_done()
def markTaskDone(self):
self.work_queue.task_done()
def addCommand(self, cmd):
self.logger.debug("Adding cmd to work_queue: %s" % cmd.cmdStr)
self.work_queue.put(cmd)
self._assigned += 1
def _join_work_queue_with_timeout(self, timeout):
"""
Queue.join() unfortunately doesn't take a timeout (see
https://bugs.python.org/issue9634). Fake it here, with a solution
inspired by notes on that bug report.
XXX This solution uses undocumented Queue internals (though they are not
underscore-prefixed...).
"""
done_condition = self.work_queue.all_tasks_done
done_condition.acquire()
try:
while self.work_queue.unfinished_tasks:
if (timeout <= 0):
# Timed out.
return False
start_time = time.time()
done_condition.wait(timeout)
timeout -= (time.time() - start_time)
finally:
done_condition.release()
return True
def join(self, timeout=None):
"""
Waits (up to an optional timeout) for the worker queue to be fully
completed, and returns True if the pool is now done with its work.
A None timeout indicates that join() should wait forever; the return
value is always True in this case. Zero and negative timeouts indicate
that join() will query the queue status and return immediately, whether
the queue is done or not.
"""
if timeout is None:
self.work_queue.join()
return True
return self._join_work_queue_with_timeout(timeout)
def joinWorkers(self):
for w in self.workers:
w.join()
def _pop_completed(self):
"""
Pops an item off the completed queue and decrements the assigned count.
If the queue is empty, throws Queue.Empty.
"""
item = self.completed_queue.get(False)
self._assigned -= 1
return item
def getCompletedItems(self):
completed_list = []
try:
while True:
item = self._pop_completed() # will throw Empty
if item is not None:
completed_list.append(item)
except Empty:
return completed_list
def check_results(self):
""" goes through all items in the completed_queue and throws an exception at the
first one that didn't execute successfully
throws ExecutionError
"""
try:
while True:
item = self._pop_completed() # will throw Empty
if not item.get_results().wasSuccessful():
raise ExecutionError("Error Executing Command: ", item)
except Empty:
return
def empty_completed_items(self):
while not self.completed_queue.empty():
self._pop_completed()
def isDone(self):
# TODO: not sure that qsize() is safe
return (self.assigned == self.completed_queue.qsize())
@property
def assigned(self):
"""
A read-only count of the number of commands that have been added to the
pool. This count is only decremented when items are removed from the
completed queue via getCompletedItems(), empty_completed_items(), or
check_results().
"""
return self._assigned
@property
def completed(self):
"""
A read-only count of the items in the completed queue. Will be reset to
zero after a call to empty_completed_items() or getCompletedItems().
"""
return self.completed_queue.qsize()
def haltWork(self):
self.logger.debug("WorkerPool haltWork()")
self.should_stop = True
for w in self.workers:
w.haltWork()
self.work_queue.put(self.halt_command)
def join_and_indicate_progress(pool, outfile=sys.stdout, interval=1):
"""
Waits for a WorkerPool to complete its work, flushing dots to stdout every
second. If any dots are printed (i.e. the work takes longer than the
printing interval), a newline is also printed upon completion.
The file to print to and the interval between printings can be overridden.
"""
printed = False
while not pool.join(interval):
outfile.write('.')
outfile.flush()
printed = True
if printed:
outfile.write('\n')
class OperationWorkerPool(WorkerPool):
""" TODO: This is a hack! In reality, the WorkerPool should work with Operations, and
Command should be a subclass of Operation. Till then, we'll spoof the necessary Command
functionality within Operation. """
def __init__(self, numWorkers=16, operations=None):
if operations is not None:
for operation in operations:
self._spoof_operation(operation)
super(OperationWorkerPool, self).__init__(numWorkers, operations)
def check_results(self):
raise NotImplementedError("OperationWorkerPool has no means of verifying success.")
def _spoof_operation(self, operation):
operation.cmdStr = str(operation)
class Worker(Thread):
"""TODO:"""
pool = None
cmd = None
name = None
logger = None
def __init__(self, name, pool):
self.name = name
self.pool = pool
self.logger = logger
Thread.__init__(self)
self.daemon = pool.daemonize
def run(self):
while True:
try:
try:
self.cmd = self.pool.getNextWorkItem()
except TypeError:
# misleading exception raised during interpreter shutdown
return
# we must have got a command to run here
if self.cmd is None:
self.logger.debug("[%s] got a None cmd" % self.name)
self.pool.markTaskDone()
elif self.cmd is self.pool.halt_command:
self.logger.debug("[%s] got a halt cmd" % self.name)
self.pool.markTaskDone()
self.cmd = None
return
elif self.pool.should_stop:
self.logger.debug("[%s] got cmd and pool is stopped: %s" % (self.name, self.cmd))
self.pool.markTaskDone()
self.cmd = None
else:
self.logger.debug("[%s] got cmd: %s" % (self.name, self.cmd.cmdStr))
self.cmd.run()
self.logger.debug("[%s] finished cmd: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
except Exception as e:
self.logger.exception(e)
if self.cmd:
self.logger.debug("[%s] finished cmd with exception: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
def haltWork(self):
self.logger.debug("[%s] haltWork" % self.name)
# this was originally coded as
#
# if self.cmd is not None:
# self.cmd.interrupt()
# self.cmd.cancel()
#
# but as observed in MPP-13808, the worker thread's run() loop may set self.cmd to None
# past the point where the calling thread checks self.cmd for None, leading to a curious
# "'NoneType' object has no attribute 'cancel' exception" which may prevent the worker pool's
# haltWorkers() from actually halting all the workers.
#
c = self.cmd
if c is not None and isinstance(c, Command):
c.interrupt()
c.cancel()
"""
TODO: consider just having a single interface that needs to be implemented for
describing work to allow the Workers to use it. This would allow the user
to better provide logic necessary. i.e. even though the user wants to
execute a unix command... how the results are interpretted are highly
application specific. So we should have a separate level of abstraction
for executing UnixCommands and DatabaseCommands from this one.
other things to think about:
-- how to support cancel
-- how to support progress
-- undo?
-- blocking vs. unblocking
"""
# --------------------------------NEW WORLD-----------------------------------
class CommandResult():
""" Used as a way to package up the results from a GpCommand
"""
# rc,stdout,stderr,completed,halt
def __init__(self, rc, stdout, stderr, completed, halt, pickled=False):
self.rc = rc
if pickled:
self.stdout = stdout
else:
self.stdout = stdout.decode()
self.stderr = stderr.decode()
self.completed = completed
self.halt = halt
def printResult(self):
res = "cmd had rc=%s completed=%s halted=%s\n stdout='%s'\n " \
"stderr='%s'" % (str(self.rc), str(self.completed), str(self.halt), self.stdout, self.stderr)
return res
def wasSuccessful(self):
if self.halt:
return False
if not self.completed:
return False
if self.rc != 0:
return False
return True
def __str__(self):
return self.printResult()
def split_stdout(self, how=':'):
"""
TODO: AK: This doesn't belong here if it pertains only to pg_controldata.
MPP-16318: Skip over discrepancies in the pg_controldata stdout, as it's
not this code's responsibility to judge the pg_controldata stdout. This is
especially true for 'immediate' shutdown, in which case, we won't even
care for WARNINGs or other pg_controldata discrepancies.
"""
for line in self.stdout.split('\n'):
ret = line.split(how, 1)
if len(ret) == 2:
yield ret
class ExecutionError(Exception):
def __init__(self, summary, cmd):
self.summary = summary
self.cmd = cmd
def __str__(self):
# TODO: improve dumping of self.cmd
return "ExecutionError: '%s' occurred. Details: '%s' %s" % \
(self.summary, self.cmd.cmdStr, self.cmd.get_results().printResult())
# specify types of execution contexts.
LOCAL = 1
REMOTE = 2
gExecutionContextFactory = None
#
# @param factory needs to have a createExecutionContext(self, execution_context_id, remoteHost, stdin) function
#
def setExecutionContextFactory(factory):
global gExecutionContextFactory
gExecutionContextFactory = factory
def createExecutionContext(execution_context_id, remoteHost, stdin, gphome=None):
if gExecutionContextFactory is not None:
return gExecutionContextFactory.createExecutionContext(execution_context_id, remoteHost, stdin)
elif execution_context_id == LOCAL:
return LocalExecutionContext(stdin)
elif execution_context_id == REMOTE:
if remoteHost is None:
raise Exception("Programmer Error. Specified REMOTE execution context but didn't provide a remoteHost")
return RemoteExecutionContext(remoteHost, stdin, gphome)
class ExecutionContext():
""" An ExecutionContext defines where and how to execute the Command and how to
gather up information that are the results of the command.
"""
def __init__(self):
pass
def execute(self, cmd):
pass
def interrupt(self):
pass
def cancel(self):
pass
class LocalExecutionContext(ExecutionContext):
proc = None
halt = False
completed = False
def __init__(self, stdin):
ExecutionContext.__init__(self)
self.stdin = stdin
pass
def execute(self, cmd, wait=True, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
# also propagate env from command instance specific map
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
# executable='/bin/bash' is to ensure the shell is bash. bash isn't the
# actual command executed, but the shell that command string runs under.
self.proc = gpsubprocess.Popen(cmd.cmdStr, env=None, shell=True,
executable='/bin/bash',
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
cmd.pid = self.proc.pid
if wait:
(rc, stdout_value, stderr_value) = self.proc.communicate2(input=self.stdin)
self.completed = True
cmd.set_results(CommandResult(
rc, stdout_value, stderr_value, self.completed, self.halt, pickled=pickled))
def cancel(self):
if self.proc:
try:
os.kill(self.proc.pid, signal.SIGTERM)
except OSError:
pass
def interrupt(self):
self.halt = True
if self.proc:
self.proc.cancel()
class RemoteExecutionContext(LocalExecutionContext):
trail = set()
"""
Leaves a trail of hosts to which we've ssh'ed, during the life of a particular interpreter.
"""
def __init__(self, targetHost, stdin, gphome=None):
LocalExecutionContext.__init__(self, stdin)
self.targetHost = targetHost
if gphome:
self.gphome = gphome
else:
self.gphome = GPHOME
def execute(self, cmd, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
self.__class__.trail.add(self.targetHost)
# also propagate env from command instance specific map
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
# Escape \ and " for remote execution
cmd.cmdStr = cmd.cmdStr.replace('\\','\\\\').replace('"', '\\"')
cmd.cmdStr = "ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 " \
"{targethost} \"{gphome} {cmdstr}\"".format(targethost=self.targetHost,
gphome=". %s/greenplum_path.sh;" % self.gphome,
cmdstr=cmd.cmdStr)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd)
pass
def __retry(self, cmd, count=0):
if count == SSH_MAX_RETRY:
return
time.sleep(SSH_RETRY_DELAY)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd, count + 1)
class Command(object):
""" TODO:
"""
name = None
cmdStr = None
results = None
exec_context = None
propagate_env_map = {} # specific environment variables for this command instance
def __init__(self, name, cmdStr, ctxt=LOCAL, remoteHost=None, stdin=None, gphome=None, pickled=False):
self.name = name
self.cmdStr = cmdStr
self.exec_context = createExecutionContext(ctxt, remoteHost, stdin=stdin,
gphome=gphome)
self.remoteHost = remoteHost
self.logger = gplog.get_default_logger()
self.pickled = pickled
def __str__(self):
if self.results:
return "%s cmdStr='%s' had result: %s" % (self.name, self.cmdStr, self.results)
else:
return "%s cmdStr='%s'" % (self.name, self.cmdStr)
# Start a process that will execute the command but don't wait for
# it to complete. Return the Popen object instead.
def runNoWait(self):
self.exec_context.execute(self, wait=False, pickled=self.pickled)
return self.exec_context.proc
def run(self, validateAfter=False):
self.logger.debug("Running Command: %s" % self.cmdStr)
self.exec_context.execute(self, pickled=self.pickled)
if validateAfter:
self.validate()
pass
def set_results(self, results):
self.results = results
def get_results(self):
return self.results
def get_stdout(self, strip=True):
if self.results is None:
raise Exception("command not run yet")
return self.results.stdout if not strip else self.results.stdout.strip()
def get_stdout_lines(self):
return self.results.stdout.splitlines()
def get_stderr_lines(self):
return self.results.stderr.splitlines()
def get_return_code(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.rc
def get_stderr(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.stderr
def cancel(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.cancel()
def interrupt(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.interrupt()
def was_successful(self):
if self.results is None:
return False
else:
return self.results.wasSuccessful()
def validate(self, expected_rc=0):
"""Plain vanilla validation which expects a 0 return code."""
if self.results.rc != expected_rc:
self.logger.debug(self.results)
raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
class SQLCommand(Command):
"""Base class for commands that execute SQL statements. Classes
that inherit from SQLCOmmand should set cancel_conn to the pygresql
connection they wish to cancel and check self.cancel_flag."""
def __init__(self, name):
Command.__init__(self, name, cmdStr=None)
self.cancel_flag = False
self.cancel_conn = None
def run(self, validateAfter=False):
raise ExecutionError("programmer error. implementors of SQLCommand must implement run()", self)
def interrupt(self):
# No execution context for SQLCommands
pass
def cancel(self):
# assignment is an atomic operation in python
self.cancel_flag = True
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
DB(self.cancel_conn).cancel()
def run_remote_commands(name, commands):
"""
"""
cmds = {}
pool = WorkerPool()
for host, cmdStr in list(commands.items()):
cmd = Command(name=name, cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
cmds[host] = cmd
pool.join()
pool.check_results()
return cmds
| 33.451163
| 116
| 0.612023
|
from queue import Queue, Empty
from threading import Thread
import os
import signal
import subprocess
import sys
import time
from gppylib import gplog
from gppylib import gpsubprocess
from pg import DB
logger = gplog.get_default_logger()
GPHOME = os.environ.get('GPHOME')
SSH_MAX_RETRY = 10
SSH_RETRY_DELAY = .5
class WorkerPool(object):
halt_command = 'halt command'
def __init__(self, numWorkers=16, items=None, daemonize=False, logger=gplog.get_default_logger()):
if numWorkers <= 0:
raise Exception("WorkerPool(): numWorkers should be greater than 0.")
self.workers = []
self.should_stop = False
self.work_queue = Queue()
self.completed_queue = Queue()
self._assigned = 0
self.daemonize = daemonize
self.logger = logger
if items is not None:
for item in items:
self.addCommand(item)
for i in range(0, numWorkers):
w = Worker("worker%d" % i, self)
self.workers.append(w)
w.start()
self.numWorkers = numWorkers
def getNumWorkers(self):
return self.numWorkers
def getNextWorkItem(self):
return self.work_queue.get(block=True)
def addFinishedWorkItem(self, command):
self.completed_queue.put(command)
self.work_queue.task_done()
def markTaskDone(self):
self.work_queue.task_done()
def addCommand(self, cmd):
self.logger.debug("Adding cmd to work_queue: %s" % cmd.cmdStr)
self.work_queue.put(cmd)
self._assigned += 1
def _join_work_queue_with_timeout(self, timeout):
done_condition = self.work_queue.all_tasks_done
done_condition.acquire()
try:
while self.work_queue.unfinished_tasks:
if (timeout <= 0):
return False
start_time = time.time()
done_condition.wait(timeout)
timeout -= (time.time() - start_time)
finally:
done_condition.release()
return True
def join(self, timeout=None):
if timeout is None:
self.work_queue.join()
return True
return self._join_work_queue_with_timeout(timeout)
def joinWorkers(self):
for w in self.workers:
w.join()
def _pop_completed(self):
item = self.completed_queue.get(False)
self._assigned -= 1
return item
def getCompletedItems(self):
completed_list = []
try:
while True:
item = self._pop_completed()
if item is not None:
completed_list.append(item)
except Empty:
return completed_list
def check_results(self):
try:
while True:
item = self._pop_completed()
if not item.get_results().wasSuccessful():
raise ExecutionError("Error Executing Command: ", item)
except Empty:
return
def empty_completed_items(self):
while not self.completed_queue.empty():
self._pop_completed()
def isDone(self):
return (self.assigned == self.completed_queue.qsize())
@property
def assigned(self):
return self._assigned
@property
def completed(self):
return self.completed_queue.qsize()
def haltWork(self):
self.logger.debug("WorkerPool haltWork()")
self.should_stop = True
for w in self.workers:
w.haltWork()
self.work_queue.put(self.halt_command)
def join_and_indicate_progress(pool, outfile=sys.stdout, interval=1):
printed = False
while not pool.join(interval):
outfile.write('.')
outfile.flush()
printed = True
if printed:
outfile.write('\n')
class OperationWorkerPool(WorkerPool):
def __init__(self, numWorkers=16, operations=None):
if operations is not None:
for operation in operations:
self._spoof_operation(operation)
super(OperationWorkerPool, self).__init__(numWorkers, operations)
def check_results(self):
raise NotImplementedError("OperationWorkerPool has no means of verifying success.")
def _spoof_operation(self, operation):
operation.cmdStr = str(operation)
class Worker(Thread):
pool = None
cmd = None
name = None
logger = None
def __init__(self, name, pool):
self.name = name
self.pool = pool
self.logger = logger
Thread.__init__(self)
self.daemon = pool.daemonize
def run(self):
while True:
try:
try:
self.cmd = self.pool.getNextWorkItem()
except TypeError:
return
if self.cmd is None:
self.logger.debug("[%s] got a None cmd" % self.name)
self.pool.markTaskDone()
elif self.cmd is self.pool.halt_command:
self.logger.debug("[%s] got a halt cmd" % self.name)
self.pool.markTaskDone()
self.cmd = None
return
elif self.pool.should_stop:
self.logger.debug("[%s] got cmd and pool is stopped: %s" % (self.name, self.cmd))
self.pool.markTaskDone()
self.cmd = None
else:
self.logger.debug("[%s] got cmd: %s" % (self.name, self.cmd.cmdStr))
self.cmd.run()
self.logger.debug("[%s] finished cmd: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
except Exception as e:
self.logger.exception(e)
if self.cmd:
self.logger.debug("[%s] finished cmd with exception: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
def haltWork(self):
self.logger.debug("[%s] haltWork" % self.name)
# past the point where the calling thread checks self.cmd for None, leading to a curious
# "'NoneType' object has no attribute 'cancel' exception" which may prevent the worker pool's
c = self.cmd
if c is not None and isinstance(c, Command):
c.interrupt()
c.cancel()
class CommandResult():
def __init__(self, rc, stdout, stderr, completed, halt, pickled=False):
self.rc = rc
if pickled:
self.stdout = stdout
else:
self.stdout = stdout.decode()
self.stderr = stderr.decode()
self.completed = completed
self.halt = halt
def printResult(self):
res = "cmd had rc=%s completed=%s halted=%s\n stdout='%s'\n " \
"stderr='%s'" % (str(self.rc), str(self.completed), str(self.halt), self.stdout, self.stderr)
return res
def wasSuccessful(self):
if self.halt:
return False
if not self.completed:
return False
if self.rc != 0:
return False
return True
def __str__(self):
return self.printResult()
def split_stdout(self, how=':'):
for line in self.stdout.split('\n'):
ret = line.split(how, 1)
if len(ret) == 2:
yield ret
class ExecutionError(Exception):
def __init__(self, summary, cmd):
self.summary = summary
self.cmd = cmd
def __str__(self):
return "ExecutionError: '%s' occurred. Details: '%s' %s" % \
(self.summary, self.cmd.cmdStr, self.cmd.get_results().printResult())
LOCAL = 1
REMOTE = 2
gExecutionContextFactory = None
def setExecutionContextFactory(factory):
global gExecutionContextFactory
gExecutionContextFactory = factory
def createExecutionContext(execution_context_id, remoteHost, stdin, gphome=None):
if gExecutionContextFactory is not None:
return gExecutionContextFactory.createExecutionContext(execution_context_id, remoteHost, stdin)
elif execution_context_id == LOCAL:
return LocalExecutionContext(stdin)
elif execution_context_id == REMOTE:
if remoteHost is None:
raise Exception("Programmer Error. Specified REMOTE execution context but didn't provide a remoteHost")
return RemoteExecutionContext(remoteHost, stdin, gphome)
class ExecutionContext():
def __init__(self):
pass
def execute(self, cmd):
pass
def interrupt(self):
pass
def cancel(self):
pass
class LocalExecutionContext(ExecutionContext):
proc = None
halt = False
completed = False
def __init__(self, stdin):
ExecutionContext.__init__(self)
self.stdin = stdin
pass
def execute(self, cmd, wait=True, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
# actual command executed, but the shell that command string runs under.
self.proc = gpsubprocess.Popen(cmd.cmdStr, env=None, shell=True,
executable='/bin/bash',
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
cmd.pid = self.proc.pid
if wait:
(rc, stdout_value, stderr_value) = self.proc.communicate2(input=self.stdin)
self.completed = True
cmd.set_results(CommandResult(
rc, stdout_value, stderr_value, self.completed, self.halt, pickled=pickled))
def cancel(self):
if self.proc:
try:
os.kill(self.proc.pid, signal.SIGTERM)
except OSError:
pass
def interrupt(self):
self.halt = True
if self.proc:
self.proc.cancel()
class RemoteExecutionContext(LocalExecutionContext):
trail = set()
def __init__(self, targetHost, stdin, gphome=None):
LocalExecutionContext.__init__(self, stdin)
self.targetHost = targetHost
if gphome:
self.gphome = gphome
else:
self.gphome = GPHOME
def execute(self, cmd, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
self.__class__.trail.add(self.targetHost)
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
cmd.cmdStr = cmd.cmdStr.replace('\\','\\\\').replace('"', '\\"')
cmd.cmdStr = "ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 " \
"{targethost} \"{gphome} {cmdstr}\"".format(targethost=self.targetHost,
gphome=". %s/greenplum_path.sh;" % self.gphome,
cmdstr=cmd.cmdStr)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd)
pass
def __retry(self, cmd, count=0):
if count == SSH_MAX_RETRY:
return
time.sleep(SSH_RETRY_DELAY)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd, count + 1)
class Command(object):
name = None
cmdStr = None
results = None
exec_context = None
propagate_env_map = {} # specific environment variables for this command instance
def __init__(self, name, cmdStr, ctxt=LOCAL, remoteHost=None, stdin=None, gphome=None, pickled=False):
self.name = name
self.cmdStr = cmdStr
self.exec_context = createExecutionContext(ctxt, remoteHost, stdin=stdin,
gphome=gphome)
self.remoteHost = remoteHost
self.logger = gplog.get_default_logger()
self.pickled = pickled
def __str__(self):
if self.results:
return "%s cmdStr='%s' had result: %s" % (self.name, self.cmdStr, self.results)
else:
return "%s cmdStr='%s'" % (self.name, self.cmdStr)
# Start a process that will execute the command but don't wait for
# it to complete. Return the Popen object instead.
def runNoWait(self):
self.exec_context.execute(self, wait=False, pickled=self.pickled)
return self.exec_context.proc
def run(self, validateAfter=False):
self.logger.debug("Running Command: %s" % self.cmdStr)
self.exec_context.execute(self, pickled=self.pickled)
if validateAfter:
self.validate()
pass
def set_results(self, results):
self.results = results
def get_results(self):
return self.results
def get_stdout(self, strip=True):
if self.results is None:
raise Exception("command not run yet")
return self.results.stdout if not strip else self.results.stdout.strip()
def get_stdout_lines(self):
return self.results.stdout.splitlines()
def get_stderr_lines(self):
return self.results.stderr.splitlines()
def get_return_code(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.rc
def get_stderr(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.stderr
def cancel(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.cancel()
def interrupt(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.interrupt()
def was_successful(self):
if self.results is None:
return False
else:
return self.results.wasSuccessful()
def validate(self, expected_rc=0):
if self.results.rc != expected_rc:
self.logger.debug(self.results)
raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
class SQLCommand(Command):
def __init__(self, name):
Command.__init__(self, name, cmdStr=None)
self.cancel_flag = False
self.cancel_conn = None
def run(self, validateAfter=False):
raise ExecutionError("programmer error. implementors of SQLCommand must implement run()", self)
def interrupt(self):
# No execution context for SQLCommands
pass
def cancel(self):
# assignment is an atomic operation in python
self.cancel_flag = True
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
DB(self.cancel_conn).cancel()
def run_remote_commands(name, commands):
cmds = {}
pool = WorkerPool()
for host, cmdStr in list(commands.items()):
cmd = Command(name=name, cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
cmds[host] = cmd
pool.join()
pool.check_results()
return cmds
| true
| true
|
f71748e77f76da11ab56b956f1c6463063b71ea1
| 1,609
|
py
|
Python
|
main.py
|
manulaiko/ulauncher-openInBrowser
|
2536813431253f4711a950b50669d1bb6f842de4
|
[
"MIT"
] | 1
|
2020-06-10T10:34:05.000Z
|
2020-06-10T10:34:05.000Z
|
main.py
|
manulaiko/ulauncher-openInBrowser
|
2536813431253f4711a950b50669d1bb6f842de4
|
[
"MIT"
] | 6
|
2018-01-26T10:04:02.000Z
|
2020-09-07T17:09:07.000Z
|
main.py
|
manulaiko/ulauncher-openInBrowser
|
2536813431253f4711a950b50669d1bb6f842de4
|
[
"MIT"
] | 5
|
2018-03-24T09:35:38.000Z
|
2020-11-02T03:42:09.000Z
|
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
import webbrowser
import re
class OpenInBrowser(Extension):
def __init__(self):
super(OpenInBrowser, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_argument()
items = [
ExtensionResultItem(
icon='images/icon.png',
name=event.get_argument(),
description='Open "%s" in the browser' % event.get_argument(),
on_enter=ExtensionCustomAction(data, keep_app_open=True)
)
]
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
if not re.match(r'^https?://', data):
data = 'https://'+ data
webbrowser.open_new_tab(data)
return RenderResultListAction([])
if __name__ == '__main__':
OpenInBrowser().run()
| 34.234043
| 85
| 0.712244
|
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
import webbrowser
import re
class OpenInBrowser(Extension):
def __init__(self):
super(OpenInBrowser, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_argument()
items = [
ExtensionResultItem(
icon='images/icon.png',
name=event.get_argument(),
description='Open "%s" in the browser' % event.get_argument(),
on_enter=ExtensionCustomAction(data, keep_app_open=True)
)
]
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
if not re.match(r'^https?://', data):
data = 'https://'+ data
webbrowser.open_new_tab(data)
return RenderResultListAction([])
if __name__ == '__main__':
OpenInBrowser().run()
| true
| true
|
f7174a07455b8d2615386130e8a5a7f7b941d1ab
| 1,222
|
py
|
Python
|
setup.py
|
Shravan-1908/pyscreenrec
|
286c1a24c95918353388007e3c7fcd23d404ba8f
|
[
"MIT"
] | 15
|
2021-02-11T16:29:28.000Z
|
2021-09-11T14:17:19.000Z
|
setup.py
|
Shravan-1908/pyscreenrec
|
286c1a24c95918353388007e3c7fcd23d404ba8f
|
[
"MIT"
] | 2
|
2021-02-26T17:38:32.000Z
|
2021-05-19T17:58:34.000Z
|
setup.py
|
Shravan-1908/pyscreenrec
|
286c1a24c95918353388007e3c7fcd23d404ba8f
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
VERSION = 0.4
with open("README.md") as f:
README = f.read()
setup(
name = "pyscreenrec",
version = VERSION,
description = "A small and cross-platform python library for recording screen.",
long_description_content_type = "text/markdown",
long_description = README,
url="https://github.com/Shravan-1908/pyscreenrec",
author = "Shravan Asati",
author_email = "dev.shravan@protonmail.com",
packages = find_packages(),
install_requires = ["pyscreeze", "opencv-python", "natsort"],
license = 'MIT',
keywords = ["python", "screen recording", "screen", "recording", "screenshots"],
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Libraries"
]
)
| 37.030303
| 84
| 0.626023
|
from setuptools import find_packages, setup
VERSION = 0.4
with open("README.md") as f:
README = f.read()
setup(
name = "pyscreenrec",
version = VERSION,
description = "A small and cross-platform python library for recording screen.",
long_description_content_type = "text/markdown",
long_description = README,
url="https://github.com/Shravan-1908/pyscreenrec",
author = "Shravan Asati",
author_email = "dev.shravan@protonmail.com",
packages = find_packages(),
install_requires = ["pyscreeze", "opencv-python", "natsort"],
license = 'MIT',
keywords = ["python", "screen recording", "screen", "recording", "screenshots"],
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Libraries"
]
)
| true
| true
|
f7174a5077e5e6d8533019e51bf2c5b67b88f10b
| 984
|
py
|
Python
|
python-dtf/tests/integration/prop/test_prop_dump.py
|
jakev/dtf
|
a761ace77cea051bfb88d56df65ae6b83f664480
|
[
"Apache-2.0"
] | 58
|
2015-01-13T16:24:31.000Z
|
2016-11-21T16:00:58.000Z
|
python-dtf/tests/integration/prop/test_prop_dump.py
|
jakev/dtf
|
a761ace77cea051bfb88d56df65ae6b83f664480
|
[
"Apache-2.0"
] | 51
|
2015-03-11T20:42:21.000Z
|
2017-01-18T02:49:10.000Z
|
python-dtf/tests/integration/prop/test_prop_dump.py
|
jakev/dtf
|
a761ace77cea051bfb88d56df65ae6b83f664480
|
[
"Apache-2.0"
] | 15
|
2016-02-01T00:37:21.000Z
|
2016-12-09T07:03:36.000Z
|
# Android Device Testing Framework ("dtf")
# Copyright 2013-2016 Jake Valletta (@jake_valletta)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for the "prop dump" utility"""
from __future__ import absolute_import
import dtf.testutils as testutils
def test_no_args():
"""Running dump with no args"""
testutils.deploy_config(testutils.get_default_config())
rtn = testutils.dtf("prop dump")
testutils.undeploy()
assert(rtn.return_code == 0)
| 29.818182
| 74
| 0.748984
|
from __future__ import absolute_import
import dtf.testutils as testutils
def test_no_args():
testutils.deploy_config(testutils.get_default_config())
rtn = testutils.dtf("prop dump")
testutils.undeploy()
assert(rtn.return_code == 0)
| true
| true
|
f7174c3b69586ad7a0dd822a81cded6f137b0e87
| 1,204
|
py
|
Python
|
tests/integration/test_tmp_policy/test.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 15,577
|
2019-09-23T11:57:53.000Z
|
2022-03-31T18:21:48.000Z
|
tests/integration/test_tmp_policy/test.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 16,476
|
2019-09-23T11:47:00.000Z
|
2022-03-31T23:06:01.000Z
|
tests/integration/test_tmp_policy/test.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 3,633
|
2019-09-23T12:18:28.000Z
|
2022-03-31T15:55:48.000Z
|
# pylint: disable=unused-argument
# pylint: disable=redefined-outer-name
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node',
main_configs=["configs/config.d/storage_configuration.xml"],
tmpfs=['/disk1:size=100M', '/disk2:size=100M'])
@pytest.fixture(scope='module')
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_different_versions(start_cluster):
query = 'SELECT count(ignore(*)) FROM (SELECT * FROM system.numbers LIMIT 1e7) GROUP BY number'
settings = {
'max_bytes_before_external_group_by': 1 << 20,
'max_bytes_before_external_sort': 1 << 20,
}
assert node.contains_in_log('Setting up /disk1/ to store temporary data in it')
assert node.contains_in_log('Setting up /disk2/ to store temporary data in it')
node.query(query, settings=settings)
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk1/')
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk2/')
| 32.540541
| 99
| 0.69186
|
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node',
main_configs=["configs/config.d/storage_configuration.xml"],
tmpfs=['/disk1:size=100M', '/disk2:size=100M'])
@pytest.fixture(scope='module')
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_different_versions(start_cluster):
query = 'SELECT count(ignore(*)) FROM (SELECT * FROM system.numbers LIMIT 1e7) GROUP BY number'
settings = {
'max_bytes_before_external_group_by': 1 << 20,
'max_bytes_before_external_sort': 1 << 20,
}
assert node.contains_in_log('Setting up /disk1/ to store temporary data in it')
assert node.contains_in_log('Setting up /disk2/ to store temporary data in it')
node.query(query, settings=settings)
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk1/')
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk2/')
| true
| true
|
f7174ca5b3715337bd66e103f422e337ca016408
| 492
|
py
|
Python
|
Home/migrations/0006_auto_20201005_2114.py
|
n3trob3/nimrodage
|
578eb14e2e8f7dc7ae58913b6131fd60c1596c0b
|
[
"Apache-2.0"
] | null | null | null |
Home/migrations/0006_auto_20201005_2114.py
|
n3trob3/nimrodage
|
578eb14e2e8f7dc7ae58913b6131fd60c1596c0b
|
[
"Apache-2.0"
] | null | null | null |
Home/migrations/0006_auto_20201005_2114.py
|
n3trob3/nimrodage
|
578eb14e2e8f7dc7ae58913b6131fd60c1596c0b
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-10-05 20:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0005_auto_20201005_2107'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='meeting',
field=models.CharField(blank=True, choices=[('G', 'Google/Internet search'), ('R', 'Referral'), ('O', 'Other')], max_length=1),
),
]
| 25.894737
| 140
| 0.571138
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0005_auto_20201005_2107'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='meeting',
field=models.CharField(blank=True, choices=[('G', 'Google/Internet search'), ('R', 'Referral'), ('O', 'Other')], max_length=1),
),
]
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.