hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f715b08addca895d652922233042ac6fcca2b312
| 2,145
|
py
|
Python
|
src/data/datasets/BAIR/BAIR.py
|
msc5/junior-iw
|
d356e015fcd3a3be638097a1acc02d5dea4751aa
|
[
"MIT"
] | null | null | null |
src/data/datasets/BAIR/BAIR.py
|
msc5/junior-iw
|
d356e015fcd3a3be638097a1acc02d5dea4751aa
|
[
"MIT"
] | null | null | null |
src/data/datasets/BAIR/BAIR.py
|
msc5/junior-iw
|
d356e015fcd3a3be638097a1acc02d5dea4751aa
|
[
"MIT"
] | null | null | null |
import os
import io
import numpy as np
from PIL import Image
import torch
from torchvision.transforms import ToTensor
class BAIR (object):
"""Data Handler that loads robot pushing data."""
def __init__(self, data_root, train=True, seq_len=20, image_size=64):
self.root_dir = data_root
if train:
self.data_dir = '%s/processed_data/train' % self.root_dir
self.ordered = False
else:
self.data_dir = '%s/processed_data/test' % self.root_dir
self.ordered = True
self.dirs = []
for d1 in os.listdir(self.data_dir):
for d2 in os.listdir('%s/%s' % (self.data_dir, d1)):
self.dirs.append('%s/%s/%s' % (self.data_dir, d1, d2))
self.seq_len = seq_len
self.image_size = image_size
self.seed_is_set = False # multi threaded loading
self.d = 0
self.totensor = ToTensor()
def set_seed(self, seed):
if not self.seed_is_set:
self.seed_is_set = True
np.random.seed(seed)
def __len__(self):
return len(self.dirs)
def get_seq(self):
if self.ordered:
d = self.dirs[self.d]
if self.d == len(self.dirs) - 1:
self.d = 0
else:
self.d += 1
else:
d = self.dirs[np.random.randint(len(self.dirs))]
image_seq = []
for i in range(self.seq_len):
fname = '%s/%d.png' % (d, i)
# im = imread(fname).reshape(1, 64, 64, 3)
# im = np.array(Image.open(fname)).reshape((1, 3, 64, 64))
im = self.totensor(Image.open(fname)).reshape(1, 3, 64, 64)
image_seq.append(im)
image_seq = torch.cat(image_seq, axis=0)
return image_seq
def __getitem__(self, index):
self.set_seed(index)
return self.get_seq()
if __name__ == "__main__":
from torch.utils.data import DataLoader
train_dataset = BAIR('src/data/datasets/BAIR/raw', train=True)
train_dataloader = DataLoader(train_dataloader, batch_size=4)
print(len(train_dataset, train_dataloader))
| 31.086957
| 73
| 0.577622
|
import os
import io
import numpy as np
from PIL import Image
import torch
from torchvision.transforms import ToTensor
class BAIR (object):
def __init__(self, data_root, train=True, seq_len=20, image_size=64):
self.root_dir = data_root
if train:
self.data_dir = '%s/processed_data/train' % self.root_dir
self.ordered = False
else:
self.data_dir = '%s/processed_data/test' % self.root_dir
self.ordered = True
self.dirs = []
for d1 in os.listdir(self.data_dir):
for d2 in os.listdir('%s/%s' % (self.data_dir, d1)):
self.dirs.append('%s/%s/%s' % (self.data_dir, d1, d2))
self.seq_len = seq_len
self.image_size = image_size
self.seed_is_set = False
self.d = 0
self.totensor = ToTensor()
def set_seed(self, seed):
if not self.seed_is_set:
self.seed_is_set = True
np.random.seed(seed)
def __len__(self):
return len(self.dirs)
def get_seq(self):
if self.ordered:
d = self.dirs[self.d]
if self.d == len(self.dirs) - 1:
self.d = 0
else:
self.d += 1
else:
d = self.dirs[np.random.randint(len(self.dirs))]
image_seq = []
for i in range(self.seq_len):
fname = '%s/%d.png' % (d, i)
im = self.totensor(Image.open(fname)).reshape(1, 3, 64, 64)
image_seq.append(im)
image_seq = torch.cat(image_seq, axis=0)
return image_seq
def __getitem__(self, index):
self.set_seed(index)
return self.get_seq()
if __name__ == "__main__":
from torch.utils.data import DataLoader
train_dataset = BAIR('src/data/datasets/BAIR/raw', train=True)
train_dataloader = DataLoader(train_dataloader, batch_size=4)
print(len(train_dataset, train_dataloader))
| true
| true
|
f715b1d7e1b1de8a5b80b9a37cf2b264b49119e2
| 620
|
py
|
Python
|
third_party/seasocks/scripts/gen_embedded.py
|
hansonl02/frc-robot-code
|
4b120c917a7709df9f010c9089a87c320bab3a16
|
[
"MIT"
] | 61
|
2017-01-22T04:38:32.000Z
|
2022-03-07T00:04:37.000Z
|
third_party/seasocks/scripts/gen_embedded.py
|
hansonl02/frc-robot-code
|
4b120c917a7709df9f010c9089a87c320bab3a16
|
[
"MIT"
] | 3
|
2018-06-28T05:34:57.000Z
|
2019-01-16T15:46:22.000Z
|
third_party/seasocks/scripts/gen_embedded.py
|
hansonl02/frc-robot-code
|
4b120c917a7709df9f010c9089a87c320bab3a16
|
[
"MIT"
] | 17
|
2017-05-12T15:32:03.000Z
|
2021-12-09T12:49:38.000Z
|
#!/usr/bin/env python
import os, os.path, sys
print """
#include "internal/Embedded.h"
#include <string>
#include <unordered_map>
namespace {
std::unordered_map<std::string, EmbeddedContent> embedded = {
"""
for f in sys.argv[1:]:
bytes = open(f, 'rb').read()
print '{"/%s", {' % os.path.basename(f)
print '"' + "".join(['\\x%02x' % ord(x) for x in bytes]) + '"'
print ',%d }},' % len(bytes)
print """
};
} // namespace
const EmbeddedContent* findEmbeddedContent(const std::string& name) {
auto found = embedded.find(name);
if (found == embedded.end()) {
return NULL;
}
return &found->second;
}
"""
| 17.222222
| 69
| 0.620968
|
import os, os.path, sys
print """
#include "internal/Embedded.h"
#include <string>
#include <unordered_map>
namespace {
std::unordered_map<std::string, EmbeddedContent> embedded = {
"""
for f in sys.argv[1:]:
bytes = open(f, 'rb').read()
print '{"/%s", {' % os.path.basename(f)
print '"' + "".join(['\\x%02x' % ord(x) for x in bytes]) + '"'
print ',%d }},' % len(bytes)
print """
};
} // namespace
const EmbeddedContent* findEmbeddedContent(const std::string& name) {
auto found = embedded.find(name);
if (found == embedded.end()) {
return NULL;
}
return &found->second;
}
"""
| false
| true
|
f715b222c54a26cf324ab888f732b9102ea604a6
| 20,214
|
py
|
Python
|
tools/nni_cmd/config_schema.py
|
skyser2003/nni
|
b946888fadacdb761e4c3a79bd869284af1da3b3
|
[
"MIT"
] | 1
|
2021-03-27T10:42:42.000Z
|
2021-03-27T10:42:42.000Z
|
tools/nni_cmd/config_schema.py
|
lswzjuer/nni
|
e9cba778257804a2a1a6002687835233a779d7af
|
[
"MIT"
] | null | null | null |
tools/nni_cmd/config_schema.py
|
lswzjuer/nni
|
e9cba778257804a2a1a6002687835233a779d7af
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from schema import Schema, And, Optional, Regex, Or
from .constants import SCHEMA_TYPE_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_PATH_ERROR
def setType(key, valueType):
'''check key type'''
return And(valueType, error=SCHEMA_TYPE_ERROR % (key, valueType.__name__))
def setChoice(key, *args):
'''check choice'''
return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args)))
def setNumberRange(key, keyType, start, end):
'''check number range'''
return And(
And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
)
def setPathCheck(key):
'''check if path exist'''
return And(os.path.exists, error=SCHEMA_PATH_ERROR % key)
common_schema = {
'authorName': setType('authorName', str),
'experimentName': setType('experimentName', str),
Optional('description'): setType('description', str),
'trialConcurrency': setNumberRange('trialConcurrency', int, 1, 99999),
Optional('maxExecDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$', error='ERROR: maxExecDuration format is [digit]{s,m,h,d}')),
Optional('maxTrialNum'): setNumberRange('maxTrialNum', int, 1, 99999),
'trainingServicePlatform': setChoice('trainingServicePlatform', 'remote', 'local', 'pai', 'kubeflow', 'frameworkcontroller'),
Optional('searchSpacePath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'searchSpacePath'),
Optional('multiPhase'): setType('multiPhase', bool),
Optional('multiThread'): setType('multiThread', bool),
Optional('nniManagerIp'): setType('nniManagerIp', str),
Optional('logDir'): And(os.path.isdir, error=SCHEMA_PATH_ERROR % 'logDir'),
Optional('debug'): setType('debug', bool),
Optional('versionCheck'): setType('versionCheck', bool),
Optional('logLevel'): setChoice('logLevel', 'trace', 'debug', 'info', 'warning', 'error', 'fatal'),
Optional('logCollection'): setChoice('logCollection', 'http', 'none'),
'useAnnotation': setType('useAnnotation', bool),
Optional('tuner'): dict,
Optional('advisor'): dict,
Optional('assessor'): dict,
Optional('localConfig'): {
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}
}
tuner_schema_dict = {
('Anneal', 'SMAC'): {
'builtinTunerName': setChoice('builtinTunerName', 'Anneal', 'SMAC'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('Evolution'): {
'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('population_size'): setNumberRange('population_size', int, 0, 99999),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('BatchTuner', 'GridSearch', 'Random'): {
'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'),
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'TPE': {
'builtinTunerName': 'TPE',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('parallel_optimize'): setType('parallel_optimize', bool),
Optional('constant_liar_type'): setChoice('constant_liar_type', 'min', 'max', 'mean')
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'NetworkMorphism': {
'builtinTunerName': 'NetworkMorphism',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('task'): setChoice('task', 'cv', 'nlp', 'common'),
Optional('input_width'): setType('input_width', int),
Optional('input_channel'): setType('input_channel', int),
Optional('n_output_node'): setType('n_output_node', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'MetisTuner': {
'builtinTunerName': 'MetisTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('no_resampling'): setType('no_resampling', bool),
Optional('no_candidates'): setType('no_candidates', bool),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
Optional('cold_start_num'): setType('cold_start_num', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'GPTuner': {
'builtinTunerName': 'GPTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('utility'): setChoice('utility', 'ei', 'ucb', 'poi'),
Optional('kappa'): setType('kappa', float),
Optional('xi'): setType('xi', float),
Optional('nu'): setType('nu', float),
Optional('alpha'): setType('alpha', float),
Optional('cold_start_num'): setType('cold_start_num', int),
Optional('selection_num_warm_up'): setType('selection_num_warm_up', int),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'PPOTuner': {
'builtinTunerName': 'PPOTuner',
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('trials_per_update'): setNumberRange('trials_per_update', int, 0, 99999),
Optional('epochs_per_update'): setNumberRange('epochs_per_update', int, 0, 99999),
Optional('minibatch_size'): setNumberRange('minibatch_size', int, 0, 99999),
Optional('ent_coef'): setType('ent_coef', float),
Optional('lr'): setType('lr', float),
Optional('vf_coef'): setType('vf_coef', float),
Optional('max_grad_norm'): setType('max_grad_norm', float),
Optional('gamma'): setType('gamma', float),
Optional('lam'): setType('lam', float),
Optional('cliprange'): setType('cliprange', float),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
advisor_schema_dict = {
'Hyperband':{
'builtinAdvisorName': Or('Hyperband'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('R'): setType('R', int),
Optional('eta'): setType('eta', int)
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'BOHB':{
'builtinAdvisorName': Or('BOHB'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('min_budget'): setNumberRange('min_budget', int, 0, 9999),
Optional('max_budget'): setNumberRange('max_budget', int, 0, 9999),
Optional('eta'):setNumberRange('eta', int, 0, 9999),
Optional('min_points_in_model'): setNumberRange('min_points_in_model', int, 0, 9999),
Optional('top_n_percent'): setNumberRange('top_n_percent', int, 1, 99),
Optional('num_samples'): setNumberRange('num_samples', int, 1, 9999),
Optional('random_fraction'): setNumberRange('random_fraction', float, 0, 9999),
Optional('bandwidth_factor'): setNumberRange('bandwidth_factor', float, 0, 9999),
Optional('min_bandwidth'): setNumberRange('min_bandwidth', float, 0, 9999),
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized':{
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
assessor_schema_dict = {
'Medianstop': {
'builtinAssessorName': 'Medianstop',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
},
},
'Curvefitting': {
'builtinAssessorName': 'Curvefitting',
Optional('classArgs'): {
'epoch_num': setNumberRange('epoch_num', int, 0, 9999),
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
Optional('threshold'): setNumberRange('threshold', float, 0, 9999),
Optional('gap'): setNumberRange('gap', int, 1, 9999),
},
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
}
}
common_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode')
}
}
pai_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('authFile'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'authFile'),
Optional('shmMB'): setType('shmMB', int),
Optional('dataDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: dataDir format error, dataDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('virtualCluster'): setType('virtualCluster', str),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('portList'): [{
"label": setType('label', str),
"beginAt": setType('beginAt', int),
"portNumber": setType('portNumber', int)
}]
}
}
pai_config_schema = {
'paiConfig': Or({
'userName': setType('userName', str),
'passWord': setType('passWord', str),
'host': setType('host', str)
}, {
'userName': setType('userName', str),
'token': setType('token', str),
'host': setType('host', str)
})
}
kubeflow_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('ps'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('master'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('worker'):{
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}
}
}
kubeflow_config_schema = {
'kubeflowConfig':Or({
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
frameworkcontroller_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
'taskRoles': [{
'name': setType('name', str),
'taskNum': setType('taskNum', int),
'frameworkAttemptCompletionPolicy': {
'minFailedTaskCount': setType('minFailedTaskCount', int),
'minSucceededTaskCount': setType('minSucceededTaskCount', int),
},
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}]
}
}
frameworkcontroller_config_schema = {
'frameworkcontrollerConfig':Or({
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
machine_list_schema = {
Optional('machineList'):[Or({
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'passwd': setType('passwd', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}, {
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'sshKeyPath': setPathCheck('sshKeyPath'),
Optional('passphrase'): setType('passphrase', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
})]
}
LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})
REMOTE_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema, **machine_list_schema})
PAI_CONFIG_SCHEMA = Schema({**common_schema, **pai_trial_schema, **pai_config_schema})
KUBEFLOW_CONFIG_SCHEMA = Schema({**common_schema, **kubeflow_trial_schema, **kubeflow_config_schema})
FRAMEWORKCONTROLLER_CONFIG_SCHEMA = Schema({**common_schema, **frameworkcontroller_trial_schema, **frameworkcontroller_config_schema})
| 50.283582
| 137
| 0.597754
|
import os
from schema import Schema, And, Optional, Regex, Or
from .constants import SCHEMA_TYPE_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_PATH_ERROR
def setType(key, valueType):
return And(valueType, error=SCHEMA_TYPE_ERROR % (key, valueType.__name__))
def setChoice(key, *args):
return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args)))
def setNumberRange(key, keyType, start, end):
return And(
And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
)
def setPathCheck(key):
return And(os.path.exists, error=SCHEMA_PATH_ERROR % key)
common_schema = {
'authorName': setType('authorName', str),
'experimentName': setType('experimentName', str),
Optional('description'): setType('description', str),
'trialConcurrency': setNumberRange('trialConcurrency', int, 1, 99999),
Optional('maxExecDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$', error='ERROR: maxExecDuration format is [digit]{s,m,h,d}')),
Optional('maxTrialNum'): setNumberRange('maxTrialNum', int, 1, 99999),
'trainingServicePlatform': setChoice('trainingServicePlatform', 'remote', 'local', 'pai', 'kubeflow', 'frameworkcontroller'),
Optional('searchSpacePath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'searchSpacePath'),
Optional('multiPhase'): setType('multiPhase', bool),
Optional('multiThread'): setType('multiThread', bool),
Optional('nniManagerIp'): setType('nniManagerIp', str),
Optional('logDir'): And(os.path.isdir, error=SCHEMA_PATH_ERROR % 'logDir'),
Optional('debug'): setType('debug', bool),
Optional('versionCheck'): setType('versionCheck', bool),
Optional('logLevel'): setChoice('logLevel', 'trace', 'debug', 'info', 'warning', 'error', 'fatal'),
Optional('logCollection'): setChoice('logCollection', 'http', 'none'),
'useAnnotation': setType('useAnnotation', bool),
Optional('tuner'): dict,
Optional('advisor'): dict,
Optional('assessor'): dict,
Optional('localConfig'): {
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}
}
tuner_schema_dict = {
('Anneal', 'SMAC'): {
'builtinTunerName': setChoice('builtinTunerName', 'Anneal', 'SMAC'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('Evolution'): {
'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('population_size'): setNumberRange('population_size', int, 0, 99999),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('BatchTuner', 'GridSearch', 'Random'): {
'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'),
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'TPE': {
'builtinTunerName': 'TPE',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('parallel_optimize'): setType('parallel_optimize', bool),
Optional('constant_liar_type'): setChoice('constant_liar_type', 'min', 'max', 'mean')
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'NetworkMorphism': {
'builtinTunerName': 'NetworkMorphism',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('task'): setChoice('task', 'cv', 'nlp', 'common'),
Optional('input_width'): setType('input_width', int),
Optional('input_channel'): setType('input_channel', int),
Optional('n_output_node'): setType('n_output_node', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'MetisTuner': {
'builtinTunerName': 'MetisTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('no_resampling'): setType('no_resampling', bool),
Optional('no_candidates'): setType('no_candidates', bool),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
Optional('cold_start_num'): setType('cold_start_num', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'GPTuner': {
'builtinTunerName': 'GPTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('utility'): setChoice('utility', 'ei', 'ucb', 'poi'),
Optional('kappa'): setType('kappa', float),
Optional('xi'): setType('xi', float),
Optional('nu'): setType('nu', float),
Optional('alpha'): setType('alpha', float),
Optional('cold_start_num'): setType('cold_start_num', int),
Optional('selection_num_warm_up'): setType('selection_num_warm_up', int),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'PPOTuner': {
'builtinTunerName': 'PPOTuner',
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('trials_per_update'): setNumberRange('trials_per_update', int, 0, 99999),
Optional('epochs_per_update'): setNumberRange('epochs_per_update', int, 0, 99999),
Optional('minibatch_size'): setNumberRange('minibatch_size', int, 0, 99999),
Optional('ent_coef'): setType('ent_coef', float),
Optional('lr'): setType('lr', float),
Optional('vf_coef'): setType('vf_coef', float),
Optional('max_grad_norm'): setType('max_grad_norm', float),
Optional('gamma'): setType('gamma', float),
Optional('lam'): setType('lam', float),
Optional('cliprange'): setType('cliprange', float),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
advisor_schema_dict = {
'Hyperband':{
'builtinAdvisorName': Or('Hyperband'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('R'): setType('R', int),
Optional('eta'): setType('eta', int)
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'BOHB':{
'builtinAdvisorName': Or('BOHB'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('min_budget'): setNumberRange('min_budget', int, 0, 9999),
Optional('max_budget'): setNumberRange('max_budget', int, 0, 9999),
Optional('eta'):setNumberRange('eta', int, 0, 9999),
Optional('min_points_in_model'): setNumberRange('min_points_in_model', int, 0, 9999),
Optional('top_n_percent'): setNumberRange('top_n_percent', int, 1, 99),
Optional('num_samples'): setNumberRange('num_samples', int, 1, 9999),
Optional('random_fraction'): setNumberRange('random_fraction', float, 0, 9999),
Optional('bandwidth_factor'): setNumberRange('bandwidth_factor', float, 0, 9999),
Optional('min_bandwidth'): setNumberRange('min_bandwidth', float, 0, 9999),
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized':{
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
assessor_schema_dict = {
'Medianstop': {
'builtinAssessorName': 'Medianstop',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
},
},
'Curvefitting': {
'builtinAssessorName': 'Curvefitting',
Optional('classArgs'): {
'epoch_num': setNumberRange('epoch_num', int, 0, 9999),
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
Optional('threshold'): setNumberRange('threshold', float, 0, 9999),
Optional('gap'): setNumberRange('gap', int, 1, 9999),
},
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
}
}
common_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode')
}
}
pai_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('authFile'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'authFile'),
Optional('shmMB'): setType('shmMB', int),
Optional('dataDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: dataDir format error, dataDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('virtualCluster'): setType('virtualCluster', str),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('portList'): [{
"label": setType('label', str),
"beginAt": setType('beginAt', int),
"portNumber": setType('portNumber', int)
}]
}
}
pai_config_schema = {
'paiConfig': Or({
'userName': setType('userName', str),
'passWord': setType('passWord', str),
'host': setType('host', str)
}, {
'userName': setType('userName', str),
'token': setType('token', str),
'host': setType('host', str)
})
}
kubeflow_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('ps'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('master'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('worker'):{
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}
}
}
kubeflow_config_schema = {
'kubeflowConfig':Or({
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
frameworkcontroller_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
'taskRoles': [{
'name': setType('name', str),
'taskNum': setType('taskNum', int),
'frameworkAttemptCompletionPolicy': {
'minFailedTaskCount': setType('minFailedTaskCount', int),
'minSucceededTaskCount': setType('minSucceededTaskCount', int),
},
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}]
}
}
frameworkcontroller_config_schema = {
'frameworkcontrollerConfig':Or({
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
machine_list_schema = {
Optional('machineList'):[Or({
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'passwd': setType('passwd', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}, {
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'sshKeyPath': setPathCheck('sshKeyPath'),
Optional('passphrase'): setType('passphrase', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
})]
}
LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})
REMOTE_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema, **machine_list_schema})
PAI_CONFIG_SCHEMA = Schema({**common_schema, **pai_trial_schema, **pai_config_schema})
KUBEFLOW_CONFIG_SCHEMA = Schema({**common_schema, **kubeflow_trial_schema, **kubeflow_config_schema})
FRAMEWORKCONTROLLER_CONFIG_SCHEMA = Schema({**common_schema, **frameworkcontroller_trial_schema, **frameworkcontroller_config_schema})
| true
| true
|
f715b265bb35aeb0434d5be280c4ded3ce5cd7ce
| 4,313
|
py
|
Python
|
src/camps/migrations/0001_initial.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/camps/migrations/0001_initial.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/camps/migrations/0001_initial.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models, migrations
import uuid
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Camp',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('name', models.CharField(max_length=255, help_text='Name of the camp, ie. Bornhack.', verbose_name='Name')),
('start', models.DateTimeField(help_text='When the camp starts.', unique=True, verbose_name='Start date')),
('end', models.DateTimeField(help_text='When the camp ends.', unique=True, verbose_name='End date')),
],
options={
'verbose_name_plural': 'Camps',
'verbose_name': 'Camp',
},
),
migrations.CreateModel(
name='Day',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('date', models.DateField(help_text='What date?', verbose_name='Date')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='Which camp does this day belong to.', verbose_name='Camp')),
],
options={
'verbose_name_plural': 'Days',
'verbose_name': 'Day',
},
),
migrations.CreateModel(
name='Expense',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('description', models.CharField(max_length=255, help_text='What this expense covers.', verbose_name='Description')),
('amount', models.DecimalField(max_digits=7, help_text='The amount of the expense.', verbose_name='Amount', decimal_places=2)),
('currency', models.CharField(max_length=3, choices=[('btc', 'BTC'), ('dkk', 'DKK'), ('eur', 'EUR'), ('sek', 'SEK')], help_text='What currency the amount is in.', verbose_name='Currency')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp to which this expense relates to.', verbose_name='Camp')),
('covered_by', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, blank=True, help_text='Which user, if any, covered this expense.', verbose_name='Covered by', null=True)),
],
options={
'verbose_name_plural': 'Expenses',
'verbose_name': 'Expense',
},
),
migrations.CreateModel(
name='Signup',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('cost', models.DecimalField(default=1500.0, decimal_places=2, help_text='What the user should/is willing to pay for this signup.', verbose_name='Cost', max_digits=7)),
('paid', models.BooleanField(help_text='Whether the user has paid.', verbose_name='Paid?', default=False)),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp that has been signed up for.', verbose_name='Camp')),
('user', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user that has signed up.', verbose_name='User')),
],
options={
'verbose_name_plural': 'Signups',
'verbose_name': 'Signup',
},
),
]
| 54.594937
| 210
| 0.592395
|
from django.db import models, migrations
import uuid
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Camp',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('name', models.CharField(max_length=255, help_text='Name of the camp, ie. Bornhack.', verbose_name='Name')),
('start', models.DateTimeField(help_text='When the camp starts.', unique=True, verbose_name='Start date')),
('end', models.DateTimeField(help_text='When the camp ends.', unique=True, verbose_name='End date')),
],
options={
'verbose_name_plural': 'Camps',
'verbose_name': 'Camp',
},
),
migrations.CreateModel(
name='Day',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('date', models.DateField(help_text='What date?', verbose_name='Date')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='Which camp does this day belong to.', verbose_name='Camp')),
],
options={
'verbose_name_plural': 'Days',
'verbose_name': 'Day',
},
),
migrations.CreateModel(
name='Expense',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('description', models.CharField(max_length=255, help_text='What this expense covers.', verbose_name='Description')),
('amount', models.DecimalField(max_digits=7, help_text='The amount of the expense.', verbose_name='Amount', decimal_places=2)),
('currency', models.CharField(max_length=3, choices=[('btc', 'BTC'), ('dkk', 'DKK'), ('eur', 'EUR'), ('sek', 'SEK')], help_text='What currency the amount is in.', verbose_name='Currency')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp to which this expense relates to.', verbose_name='Camp')),
('covered_by', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, blank=True, help_text='Which user, if any, covered this expense.', verbose_name='Covered by', null=True)),
],
options={
'verbose_name_plural': 'Expenses',
'verbose_name': 'Expense',
},
),
migrations.CreateModel(
name='Signup',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('cost', models.DecimalField(default=1500.0, decimal_places=2, help_text='What the user should/is willing to pay for this signup.', verbose_name='Cost', max_digits=7)),
('paid', models.BooleanField(help_text='Whether the user has paid.', verbose_name='Paid?', default=False)),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp that has been signed up for.', verbose_name='Camp')),
('user', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user that has signed up.', verbose_name='User')),
],
options={
'verbose_name_plural': 'Signups',
'verbose_name': 'Signup',
},
),
]
| true
| true
|
f715b282dadb18b6d8c46e9e216062f47e9fa8c4
| 2,215
|
py
|
Python
|
tests/models/symbol/ddc_log_data_returned_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 5
|
2016-08-23T17:52:22.000Z
|
2019-05-16T08:45:30.000Z
|
tests/models/symbol/ddc_log_data_returned_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-11-10T05:30:21.000Z
|
2019-04-05T15:03:37.000Z
|
tests/models/symbol/ddc_log_data_returned_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 7
|
2016-08-25T16:11:44.000Z
|
2021-02-22T05:31:25.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.symbol.ddc_log_data_returned import DdcLogDataReturned
class DdcLogDataReturnedTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_ddc_log_data_returned(self):
ddc_log_data_returned_obj = DdcLogDataReturned()
self.assertNotEqual(ddc_log_data_returned_obj, None)
| 58.289474
| 845
| 0.776975
|
import unittest
from netapp.santricity.models.symbol.ddc_log_data_returned import DdcLogDataReturned
class DdcLogDataReturnedTest(unittest.TestCase):
def test_ddc_log_data_returned(self):
ddc_log_data_returned_obj = DdcLogDataReturned()
self.assertNotEqual(ddc_log_data_returned_obj, None)
| true
| true
|
f715b31b59adb44a6d805b23169c7a059551b417
| 4,970
|
py
|
Python
|
ravager/bot/helpers/abort_upload_handler.py
|
CoolFool/Ravager
|
3d647115689dc23a160255221aaa493f879406a5
|
[
"MIT"
] | null | null | null |
ravager/bot/helpers/abort_upload_handler.py
|
CoolFool/Ravager
|
3d647115689dc23a160255221aaa493f879406a5
|
[
"MIT"
] | 1
|
2022-03-15T06:55:48.000Z
|
2022-03-15T15:38:20.000Z
|
ravager/bot/helpers/abort_upload_handler.py
|
CoolFool/Ravager
|
3d647115689dc23a160255221aaa493f879406a5
|
[
"MIT"
] | 2
|
2022-02-09T21:30:57.000Z
|
2022-03-15T06:19:57.000Z
|
from ravager.services.google.helpers import uploader
from ravager.database.helpers.structs import OpsDataStruct
from ravager.database.tasks import Tasks
from ravager.celery_tasks.tasks import app
from ravager.services.aria.download import Download
from telegram.ext import CallbackQueryHandler
import logging
logger = logging.getLogger(__file__)
class AbortAndUpload:
def __init__(self):
pass
def callback_handler(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
method = callback_data[0]
action = callback_data[1]
src_msg_id = callback_data[2]
task = OpsDataStruct()
task.source_msg_id = src_msg_id
task = Tasks(task=task).get_task()
if method == "upload" and action == "no":
update.callback_query.edit_message_text(text="Uploading cancelled")
if method == "upload" and action == "yes":
upload_msg = update.callback_query.edit_message_text(text="Starting upload")
uploader.upload_file(task, upload_msg)
if method == "abort" and action == "yes":
src_msg_id = callback_data[2]
abort_msg = update.callback_query.edit_message_text(text="Trying to abort transfer")
abort_msg_id = abort_msg.message_id
abort_task = self.abort_task(update, context, task, abort_msg_id)
if method == "abort" and action == "no":
update.callback_query.edit_message_text(text="Transfer allowed to process as per request")
@staticmethod
def abort_task(update, context, task, abort_msg_id):
msg_sent = False
try:
# update celery task id in db for uploads cause manual upload when completed will use old task id
download = Download()
celery_task_id = task.task_id
user_id = task.user_id
gid = task.gid
source_msg_id = task.source_msg_id
revoke_task = app.control.revoke(celery_task_id, terminate=True, signal="SIGKILL")
aria_stop_download = download.remove(gid)
logger.info(aria_stop_download)
if aria_stop_download:
# context.bot.delete_message(chat_id=user_id,message_id=latest_message_id)
context.bot.send_message(chat_id=user_id, text="Task aborted successfully",
reply_to_message_id=source_msg_id)
task.status = "aborted"
Tasks(task=task).set_task()
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
msg_sent = True
return task
else:
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
except Exception as e:
logger.error(e)
if str(e) == "GID {} is not found".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
if str(e) == "No such download for GID#{}".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
context.bot.send_message(chat_id=user_id, text="Failed to abort task", reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
finally:
if not msg_sent:
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
logger.error("Failed to abort task", task)
return
def upload_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="upload")
return abort_callback
def abort_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="abort")
return abort_callback
| 46.886792
| 117
| 0.61328
|
from ravager.services.google.helpers import uploader
from ravager.database.helpers.structs import OpsDataStruct
from ravager.database.tasks import Tasks
from ravager.celery_tasks.tasks import app
from ravager.services.aria.download import Download
from telegram.ext import CallbackQueryHandler
import logging
logger = logging.getLogger(__file__)
class AbortAndUpload:
def __init__(self):
pass
def callback_handler(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
method = callback_data[0]
action = callback_data[1]
src_msg_id = callback_data[2]
task = OpsDataStruct()
task.source_msg_id = src_msg_id
task = Tasks(task=task).get_task()
if method == "upload" and action == "no":
update.callback_query.edit_message_text(text="Uploading cancelled")
if method == "upload" and action == "yes":
upload_msg = update.callback_query.edit_message_text(text="Starting upload")
uploader.upload_file(task, upload_msg)
if method == "abort" and action == "yes":
src_msg_id = callback_data[2]
abort_msg = update.callback_query.edit_message_text(text="Trying to abort transfer")
abort_msg_id = abort_msg.message_id
abort_task = self.abort_task(update, context, task, abort_msg_id)
if method == "abort" and action == "no":
update.callback_query.edit_message_text(text="Transfer allowed to process as per request")
@staticmethod
def abort_task(update, context, task, abort_msg_id):
msg_sent = False
try:
download = Download()
celery_task_id = task.task_id
user_id = task.user_id
gid = task.gid
source_msg_id = task.source_msg_id
revoke_task = app.control.revoke(celery_task_id, terminate=True, signal="SIGKILL")
aria_stop_download = download.remove(gid)
logger.info(aria_stop_download)
if aria_stop_download:
context.bot.send_message(chat_id=user_id, text="Task aborted successfully",
reply_to_message_id=source_msg_id)
task.status = "aborted"
Tasks(task=task).set_task()
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
msg_sent = True
return task
else:
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
except Exception as e:
logger.error(e)
if str(e) == "GID {} is not found".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
if str(e) == "No such download for GID#{}".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
context.bot.send_message(chat_id=user_id, text="Failed to abort task", reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
finally:
if not msg_sent:
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
logger.error("Failed to abort task", task)
return
def upload_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="upload")
return abort_callback
def abort_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="abort")
return abort_callback
| true
| true
|
f715b360e88e246929f30fa6b56a22448fc5ee17
| 228,126
|
py
|
Python
|
cinder/volume/manager.py
|
sapcc/cinder
|
9444ae7d2c7cfe2c277ff661ec9ef27a4f013f91
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/manager.py
|
sapcc/cinder
|
9444ae7d2c7cfe2c277ff661ec9ef27a4f013f91
|
[
"Apache-2.0"
] | 28
|
2017-08-17T14:46:05.000Z
|
2022-03-29T12:42:12.000Z
|
cinder/volume/manager.py
|
sapcc/cinder
|
9444ae7d2c7cfe2c277ff661ec9ef27a4f013f91
|
[
"Apache-2.0"
] | 3
|
2017-04-27T16:11:40.000Z
|
2020-02-12T21:27:00.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from castellan import key_manager
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder.keymgr import migration as key_migration
from cinder import manager
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_migration
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help="FC Zoning mode configured, only 'fabric' is "
"supported now."),
cfg.IntOpt('reinit_driver_count',
default=3,
help='Maximum times to reintialize the driver '
'if volume initialization fails. The interval of retry is '
'exponentially backoff, and will be 1s, 2s, 4s etc.'),
cfg.IntOpt('init_host_max_objects_retrieval',
default=0,
help='Max number of volumes and snapshots to be retrieved '
'per batch during volume manager host initialization. '
'Query results will be obtained in batches from the '
'database and not in one shot to avoid extreme memory '
'usage. Set 0 to turn off this functionality.'),
cfg.IntOpt('backend_stats_polling_interval',
default=60,
min=3,
help='Time in seconds between requests for usage statistics '
'from the backend. Be aware that generating usage '
'statistics is expensive for some backends, so setting '
'this value too low may adversely affect performance.'),
]
volume_backend_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
cfg.IntOpt('backend_native_threads_pool_size',
default=20,
min=20,
help='Size of the native threads pool for the backend. '
'Increase for backends that heavily rely on this, like '
'the RBD driver.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
CONF.register_opts(volume_backend_opts, group=config.SHARED_CONF_GROUP)
# MAPPING is used for driver renames to keep backwards compatibilty. When a
# driver is renamed, add a mapping here from the old name (the dict key) to the
# new name (the dict value) for at least a cycle to allow time for deployments
# to transition.
MAPPING = {
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver',
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver':
'cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_iscsi.'
'FJDXISCSIDriver',
'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver':
'cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
FAILBACK_SENTINEL = 'default'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def _get_service(self, host=None, binary=constants.VOLUME_BINARY):
host = host or self.host
ctxt = context.get_admin_context()
svc_host = volume_utils.extract_host(host, 'backend')
return objects.Service.get_by_args(ctxt, svc_host, binary)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
# NOTE(dulek): service_name=None means we're running in unit tests.
service_name = service_name or 'backend_defaults'
self.configuration = config.Configuration(volume_backend_opts,
config_group=service_name)
self._set_tpool_size(
self.configuration.backend_native_threads_pool_size)
self.stats = {}
self.service_uuid = None
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning("Driver path %s is deprecated, update your "
"configuration to the new path.", volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
try:
service = self._get_service()
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info("Service not found for updating "
"active_backend_id, assuming default "
"for driver init.")
else:
curr_active_backend_id = service.active_backend_id
self.service_uuid = service.uuid
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning("Suppressing requests library SSL Warnings")
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
# A driver can feed additional RPC endpoints into this list
driver_additional_endpoints = []
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id,
additional_endpoints=driver_additional_endpoints)
self.additional_endpoints.extend(driver_additional_endpoints)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s",
self.driver.configuration.extra_capabilities)
# Check if a per-backend AZ has been specified
backend_zone = self.driver.configuration.safe_get(
'backend_availability_zone')
if backend_zone:
self.availability_zone = backend_zone
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info('Image-volume cache enabled for host %(host)s.',
{'host': self.host})
else:
LOG.info('Image-volume cache disabled for host %(host)s.',
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = volume_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception('Fetch volume pool name failed.',
resource=volume)
return
if pool:
new_host = volume_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info("Determined volume DB was empty at startup.")
return True
else:
LOG.info("Determined volume DB was not empty at startup.")
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
# what we allow and add a list of allowed keys. Things that make sense
# are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
if snapshot_updates:
for snap in snapshots:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info('Including all resources from host %(host)s in cluster '
'%(cluster)s.',
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_gs = objects.GroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'%(num_gs)s generic groups and %(num_cache)s image '
'volume caches from host %(host)s have been included in '
'cluster %(cluster)s.',
{'num_vols': num_vols, 'num_cgs': num_cgs, 'num_gs': num_gs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf",
resource={'type': 'driver',
'id': self.__class__.__name__})
return
self._init_host(added_to_cluster, **kwargs)
if not self.driver.initialized:
reinit_count = 0
while reinit_count < CONF.reinit_driver_count:
time.sleep(2 ** reinit_count)
self._init_host(added_to_cluster, **kwargs)
if self.driver.initialized:
return
reinit_count += 1
def _init_host(self, added_to_cluster=None, **kwargs):
ctxt = context.get_admin_context()
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info("Starting volume driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
# Zero stats
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
# Batch retrieval volumes and snapshots
num_vols, num_snaps, max_objs_num, req_range = None, None, None, [0]
req_limit = CONF.init_host_max_objects_retrieval
use_batch_objects_retrieval = req_limit > 0
if use_batch_objects_retrieval:
# Get total number of volumes
num_vols, __, __ = self._get_my_volumes_summary(ctxt)
# Get total number of snapshots
num_snaps, __ = self._get_my_snapshots_summary(ctxt)
# Calculate highest number of the objects (volumes or snapshots)
max_objs_num = max(num_vols, num_snaps)
# Make batch request loop counter
req_range = range(0, max_objs_num, req_limit)
volumes_to_migrate = volume_migration.VolumeMigrationList()
for req_offset in req_range:
# Retrieve 'req_limit' number of objects starting from
# 'req_offset' position
volumes, snapshots = None, None
if use_batch_objects_retrieval:
if req_offset < num_vols:
volumes = self._get_my_volumes(ctxt,
limit=req_limit,
offset=req_offset)
else:
volumes = objects.VolumeList()
if req_offset < num_snaps:
snapshots = self._get_my_snapshots(ctxt,
limit=req_limit,
offset=req_offset)
else:
snapshots = objects.SnapshotList()
# or retrieve all volumes and snapshots per single request
else:
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception("Failed to re-export volume, "
"setting to ERROR.",
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class -
# CleanableManager
except Exception:
LOG.exception("Error during re-export on driver init.",
resource=volume)
return
if len(volumes):
volumes_to_migrate.append(volumes, ctxt)
del volumes
del snapshots
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = volume_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# Migrate any ConfKeyManager keys based on fixed_key to the currently
# configured key manager.
self._add_to_threadpool(key_migration.migrate_fixed_key,
volumes=volumes_to_migrate)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info("Driver initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
# Make sure to call CleanableManager to do the cleanup
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error("Cannot complete RPC initialization because "
"driver isn't initialized properly.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
try:
service = self._get_service()
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Service not found for updating replication_status.")
if service.replication_status != fields.ReplicationStatus.FAILED_OVER:
if stats and stats.get('replication_enabled', False):
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
if replication_status != service.replication_status:
service.replication_status = replication_status
service.save()
# Update the cluster replication status if necessary
cluster = service.cluster
if (cluster and
cluster.replication_status != service.replication_status):
cluster.replication_status = service.replication_status
cluster.save()
LOG.info("Driver post RPC initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
# For Volume creating and downloading and for Snapshot downloading
# statuses we have to set status to error
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not volume_utils.hosts_are_equivalent(resource.host,
self.host)):
pool = volume_utils.extract_host(resource.host, 'pool')
resource.host = volume_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(volume)
# Update our allocated capacity counter early to minimize race
# conditions with the scheduler.
self._update_allocated_capacity(volume)
# We lose the host value if we reschedule, so keep it here
original_host = volume.host
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.COORDINATOR.get_lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if rescheduled:
# NOTE(geguileo): Volume was rescheduled so we need to update
# volume stats because the volume wasn't created here.
# Volume.host is None now, so we pass the original host value.
self._update_allocated_capacity(volume, decrement=True,
host=original_host)
# Shared targets is only relevant for iSCSI connections.
# We default to True to be on the safe side.
volume.shared_targets = (
self.driver.capabilities.get('storage_protocol') == 'iSCSI' and
self.driver.capabilities.get('shared_targets', True))
# TODO(geguileo): service_uuid won't be enough on Active/Active
# deployments. There can be 2 services handling volumes from the same
# backend.
volume.service_uuid = self.service_uuid
volume.save()
LOG.info("Created volume successfully.", resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = volume_utils.extract_host(
resource.service_topic_queue)
backend = volume_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
3. Delete a temp volume for backup
If deleting the temp volume for backup, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and volume.encryption_key_id is not None:
raise exception.Invalid(
reason=_("Unmanaging encrypted volumes is not "
"supported."))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# To backup a snapshot or a 'in-use' volume, create a temp volume
# from the snapshot or in-use volume, and back it up.
# Get admin_metadata (needs admin context) to detect temporary volume.
is_temp_vol = False
with volume.obj_as_admin():
if volume.admin_metadata.get('temporary', 'False') == 'True':
is_temp_vol = True
LOG.info("Trying to delete temp volume: %s", volume.id)
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
if not is_temp_vol:
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != fields.SnapshotStatus.DELETING:
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error("Unable to delete busy volume.",
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
skip_quota = is_migrating or is_temp_vol
if not skip_quota:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception("Failed to update usages deleting volume.",
resource=volume)
volume.destroy()
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
if not skip_quota:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self._update_allocated_capacity(volume, decrement=True)
self.publish_service_capabilities(context)
msg = "Deleted volume successfully."
if unmanage_only:
msg = "Unmanaged volume successfully."
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted).", resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def _revert_to_snapshot_generic(self, ctxt, volume, snapshot):
"""Generic way to revert volume to a snapshot.
the framework will use the generic way to implement the revert
to snapshot feature:
1. create a temporary volume from snapshot
2. mount two volumes to host
3. copy data from temporary volume to original volume
4. detach and destroy temporary volume
"""
temp_vol = None
try:
v_options = {'display_name': '[revert] temporary volume created '
'from snapshot %s' % snapshot.id}
ctxt = context.get_internal_tenant_context() or ctxt
temp_vol = self.driver._create_temp_volume_from_snapshot(
ctxt, volume, snapshot, volume_options=v_options)
self._copy_volume_data(ctxt, temp_vol, volume)
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to use snapshot %(snapshot)s to create "
"a temporary volume and copy data to volume "
" %(volume)s.",
{'snapshot': snapshot.id,
'volume': volume.id})
if temp_vol and temp_vol.status == 'available':
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
def _revert_to_snapshot(self, context, volume, snapshot):
"""Use driver or generic method to rollback volume."""
try:
self.driver.revert_to_snapshot(context, volume, snapshot)
except (NotImplementedError, AttributeError):
LOG.info("Driver's 'revert_to_snapshot' is not found. "
"Try to use copy-snapshot-to-volume method.")
self._revert_to_snapshot_generic(context, volume, snapshot)
def _create_backup_snapshot(self, context, volume):
kwargs = {
'volume_id': volume.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume.size,
'display_name': '[revert] volume %s backup snapshot' % volume.id,
'display_description': 'This is only used for backup when '
'reverting. If the reverting process '
'failed, you can restore you data by '
'creating new volume with this snapshot.',
'volume_type_id': volume.volume_type_id,
'encryption_key_id': volume.encryption_key_id,
'metadata': {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
self.create_snapshot(context, snapshot)
return snapshot
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert a volume to a snapshot.
The process of reverting to snapshot consists of several steps:
1. create a snapshot for backup (in case of data loss)
2.1. use driver's specific logic to revert volume
2.2. try the generic way to revert volume if driver's method is missing
3. delete the backup snapshot
"""
backup_snapshot = None
try:
LOG.info("Start to perform revert to snapshot process.")
self._notify_about_volume_usage(context, volume,
"revert.start")
self._notify_about_snapshot_usage(context, snapshot,
"revert.start")
# Create a snapshot which can be used to restore the volume
# data by hand if revert process failed.
if self.driver.snapshot_revert_use_temp_snapshot():
backup_snapshot = self._create_backup_snapshot(context,
volume)
self._revert_to_snapshot(context, volume, snapshot)
except Exception as error:
with excutils.save_and_reraise_exception():
self._notify_about_volume_usage(context, volume,
"revert.end")
self._notify_about_snapshot_usage(context, snapshot,
"revert.end")
msg = ('Volume %(v_id)s revert to '
'snapshot %(s_id)s failed with %(error)s.')
msg_args = {'v_id': volume.id,
's_id': snapshot.id,
'error': six.text_type(error)}
v_res = volume.update_single_status_where(
'error',
'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'error'}
msg += ("Failed to reset volume %(id)s "
"status to %(status)s.") % msg_args
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg += ("Failed to reset snapshot %(id)s "
"status to %(status)s." % msg_args)
LOG.exception(msg, msg_args)
v_res = volume.update_single_status_where(
'available', 'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'available'}
msg = _("Revert finished, but failed to reset "
"volume %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg = _("Revert finished, but failed to reset "
"snapshot %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
if backup_snapshot:
self.delete_snapshot(context,
backup_snapshot, handle_quota=False)
msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s '
'successfully.')
msg_args = {'v_id': volume.id, 'snap_id': snapshot.id}
LOG.info(msg, msg_args)
self._notify_about_volume_usage(context, volume, "revert.end")
self._notify_about_snapshot_usage(context, snapshot, "revert.end")
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception as create_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=create_error,
detail=message_field.Detail.SNAPSHOT_CREATE_ERROR)
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata",
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=ex,
detail=message_field.Detail.SNAPSHOT_UPDATE_METADATA_FAILED
)
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
# Resync with the volume's DB value. This addresses the case where
# the snapshot creation was in flight just prior to when the volume's
# fixed_key encryption key ID was migrated to Barbican.
snapshot.encryption_key_id = vol_ref.encryption_key_id
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info("Create snapshot completed successfully",
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot,
unmanage_only=False, handle_quota=True):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy as busy_error:
LOG.error("Delete snapshot failed, due to snapshot busy.",
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=busy_error)
return
except Exception as delete_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=delete_error,
detail=message_field.Detail.SNAPSHOT_DELETE_ERROR)
# Get reservations
reservations = None
try:
if handle_quota:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Update snapshot usages failed.",
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = "Delete snapshot completed successfully."
if unmanage_only:
msg = "Unmanage snapshot completed successfully."
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
"""Updates db to show volume is attached."""
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
host_name_sanitized = volume_utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached and multiple attachments "
"are not enabled"))
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
try:
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception as excep:
with excutils.save_and_reraise_exception():
self.message_api.create(
context,
message_field.Action.ATTACH_VOLUME,
resource_uuid=volume_id,
exception=excep)
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info("Attach volume completed successfully.",
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
volume.finish_detach(attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = volume.volume_attachment
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Detach volume failed, due to "
"uninitialized driver.",
resource=volume)
except Exception as ex:
LOG.exception("Detach volume failed, due to "
"remove-export failure.",
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info("Detach volume completed successfully.", resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
cache_entry = self.image_volume_cache.get_entry(ctx,
volume_ref,
image_id,
image_meta)
if cache_entry:
LOG.debug('Cache entry already exists with image ID %'
'(image_id)s',
{'image_id': image_id})
return
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.',
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.',
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning('Failed to create new image-volume cache entry.'
' Error: %(exception)s', {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
# NOTE(yikun): Skip 'snapshot_id', 'source_volid' keys to avoid
# creating tmp img vol from wrong snapshot or wrong source vol.
skip = {'snapshot_id', 'source_volid'}
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception('Create clone_image_volume: %(volume_id)s '
'for image %(image_id)s, '
'failed (Exception: %(except)s)',
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume.refresh()
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.',
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception('Could not delete the image volume %(id)s.',
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception('Failed to register image volume location '
'%(uri)s.', {'uri': uri})
if not image_registered:
LOG.warning('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.',
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception('Could not delete failed image volume '
'%(id)s.', {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error("Upload volume to image encountered an error "
"(image-id: %(image_id)s).",
{'image_id': image_meta['id']},
resource=volume)
self.message_api.create(
context,
message_field.Action.COPY_VOLUME_TO_IMAGE,
resource_uuid=volume_id,
exception=error,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info("Copy volume to image completed successfully.",
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning("Deleting image in unexpected status: "
"%(image_status)s.",
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning("Image delete encountered an error.",
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
# NOTE(mnaser): The following configures for per-GB QoS
if specs is not None:
volume_size = int(volume.size)
tune_opts = ('read_iops_sec', 'read_bytes_sec',
'write_iops_sec', 'write_bytes_sec',
'total_iops_sec', 'total_bytes_sec')
for option in tune_opts:
option_per_gb = '%s_per_gb' % option
option_per_gb_min = '%s_per_gb_min' % option
option_max = '%s_max' % option
if option_per_gb in specs:
minimum_value = int(specs.pop(option_per_gb_min, 0))
value = int(specs[option_per_gb]) * volume_size
per_gb_value = max(minimum_value, value)
max_value = int(specs.pop(option_max, per_gb_value))
specs[option] = min(per_gb_value, max_value)
specs.pop(option_per_gb)
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format:
.. code:: json
{
"ip": "<ip>",
"initiator": "<initiator>"
}
ip:
the ip address of the connecting machine
initiator:
the iscsi initiator name of the connecting machine. This can be
None if the connecting machine does not support iscsi connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format:
.. code:: json
{
"driver_volume_type": "<driver_volume_type>",
"data": "<data>"
}
driver_volume_type:
a string to identify the type of volume. This can be used by the
calling code to determine the strategy for connecting to the
volume. This could be 'iscsi', 'rbd', 'sheepdog', etc.
data:
this is the data that the calling code will use to connect to the
volume. Keep in mind that this will be serialized to json in
various places, so it should not contain any non-json data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
# TODO(jdg): Add deprecation warning
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except Exception as ex:
LOG.exception("Model update failed.", resource=volume)
try:
self.driver.remove_export(context.elevated(), volume)
except Exception:
LOG.exception('Could not remove export after DB model failed.')
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info("Initialize volume connection completed successfully.",
resource=volume)
return conn_info
def initialize_connection_snapshot(self, ctxt, snapshot_id, connector):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate snapshot connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
model_update = None
try:
LOG.debug("Snapshot %s: creating export.", snapshot.id)
model_update = self.driver.create_export_snapshot(
ctxt.elevated(), snapshot, connector)
if model_update:
snapshot.provider_location = model_update.get(
'provider_location', None)
snapshot.provider_auth = model_update.get(
'provider_auth', None)
snapshot.save()
except exception.CinderException as ex:
msg = _("Create export of snapshot failed (%s)") % ex.msg
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
snapshot.update(model_update)
snapshot.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=snapshot)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn = self.driver.initialize_connection_snapshot(snapshot,
connector)
except Exception as err:
try:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') %
{'err': six.text_type(err)})
LOG.error(err_msg)
LOG.debug("Cleaning up failed connect initialization.")
self.driver.remove_export_snapshot(ctxt.elevated(), snapshot)
except Exception as ex:
ex_msg = (_('Error encountered during cleanup '
'of a failed attach: %(ex)s') %
{'ex': six.text_type(ex)})
LOG.error(ex_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Initialize snapshot connection completed successfully.",
resource=snapshot)
return conn
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume_ref)
def terminate_connection_snapshot(self, ctxt, snapshot_id,
connector, force=False):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.terminate_connection_snapshot(snapshot, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate snapshot connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate snapshot connection completed successfully.",
resource=snapshot)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove volume export completed successfully.",
resource=volume_ref)
def remove_export_snapshot(self, ctxt, snapshot_id):
"""Removes an export for a snapshot."""
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.remove_export_snapshot(ctxt, snapshot)
except Exception:
msg = _("Remove snapshot export failed.")
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove snapshot export completed successfully.",
resource=snapshot)
def accept_transfer(self, context, volume_id, new_user, new_project,
no_snapshots=False):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception("Update volume model for "
"transfer operation failed.",
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info("Transfer volume completed successfully.",
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume %(vol)s.",
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume encryptor"
" %(vol)s.", {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties,
force=True)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'], force=force)
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error('Unable to terminate volume connection: '
'%(err)s.', {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('_copy_volume_data %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
properties = utils.brick_get_connector_properties(use_multipath,
enforce_multipath)
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach source volume for copy.")
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor,
force=True)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to copy volume %(src)s to %(dest)s.",
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=True,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=True,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = {'host', 'cluster_name', 'availability_zone'}
skip.update(tmp_skip)
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = volume_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
dst_service = self._get_service(backend['host'])
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
availability_zone=dst_service.availability_zone,
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume.refresh()
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume.refresh()
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
# A volume might have attachments created, but if it is reserved
# it means it's being migrated prior to the attachment completion.
if not attachments or volume.status == 'reserved':
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to copy volume %(vol1)s to %(vol2)s", {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume.",
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase.",
{'vol': new_volume.id})
LOG.warning("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted.",
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
# NOTE(jdg): Things get a little hairy in here and we do a lot of
# things based on volume previous-status and current-status. At some
# point this should all be reworked but for now we need to maintain
# backward compatibility and NOT change the API so we're going to try
# and make this work best we can
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
volume_attachments = []
# NOTE(jdg): With new attach flow, we deleted the attachment, so the
# original volume should now be listed as available, we still need to
# do the magic swappy thing of name.id etc but we're done with the
# original attachment record
# In the "old flow" at this point the orig_volume_status will be in-use
# and the current status will be retyping. This is sort of a
# misleading deal, because Nova has already called terminate
# connection
# New Attach Flow, Nova has gone ahead and deleted the attachemnt, this
# is the source/original volume, we've already migrated the data, we're
# basically done with it at this point. We don't need to issue the
# detach to toggle the status
if orig_volume_status == 'in-use' and volume.status != 'available':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error("Detach migration source volume "
"%(volume.id)s from attachment "
"%(attachment.id)s failed: %(err)s",
{'err': ex,
'volume.id': volume.id,
'attachment.id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# NOTE(jdg): With new attachment API's nova will delete the
# attachment for the source volume for us before calling the
# migration-completion, now we just need to do the swapping on the
# volume record, but don't jack with the attachments other than
# updating volume_id
# In the old flow at this point the volumes are in attaching and
# deleting status (dest/new is deleting, but we've done our magic
# swappy thing so it's a bit confusing, but it does unwind properly
# when you step through it)
# In the new flow we simlified this and we don't need it, instead of
# doing a bunch of swapping we just do attachment-create/delete on the
# nova side, and then here we just do the ID swaps that are necessary
# to maintain the old beahvior
# Restore the attachments for old flow use-case
if orig_volume_status == 'in-use' and volume.status in ['available',
'reserved',
'attaching']:
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
# This is just a db state toggle, the volume is actually
# already attach and in-use, new attachment flow won't allow
# this
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
attachment.attach_mode or 'rw')
# At this point we now have done almost all of our swapping and
# state-changes. The target volume is now marked back to
# "in-use" the destination/worker volume is now in deleting
# state and the next steps will finish the deletion steps
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s',
{'vol': volume.id, 'err': ex})
# For the new flow this is really the key part. We just use the
# attachments to the worker/destination volumes that we created and
# used for the libvirt migration and we'll just swap their volume_id
# entries to coorespond with the volume.id swap we did
for attachment in VA_LIST.get_all_by_volume_id(ctxt, updated_new.id):
attachment.volume_id = volume.id
attachment.save()
# Phewww.. that was easy! Once we get to a point where the old attach
# flow can go away we really should rewrite all of this.
LOG.info("Complete-Migrate volume completed successfully.",
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
dst_service = self._get_service(host['host'])
updates = {
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'availability_zone': dst_service.availability_zone,
'previous_status': volume.status,
}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info("Migrate volume completed successfully.",
resource=volume)
def _report_driver_status(self, context):
# It's possible during live db migration that the self.service_uuid
# value isn't set (we didn't restart services), so we'll go ahead
# and make this a part of the service periodic
if not self.service_uuid:
# We hack this with a try/except for unit tests temporarily
try:
service = self._get_service()
self.service_uuid = service.uuid
except exception.ServiceNotFound:
LOG.warning("Attempt to update service_uuid "
"resulted in a Service NotFound "
"exception, service_uuid field on "
"volumes will be NULL.")
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning("Update driver status failed: %(config_group)s "
"is uninitialized.",
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if "pools" in volume_stats:
for pool in volume_stats["pools"]:
pool.update(self.extra_capabilities)
else:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# NOTE(xyang): If driver reports replication_status to be
# 'error' in volume_stats, get model updates from driver
# and update db
if volume_stats.get('replication_status') == (
fields.ReplicationStatus.ERROR):
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(
context, filters=filters)
group_model_updates, volume_model_updates = (
self.driver.get_replication_error_status(context,
groups))
for grp_update in group_model_updates:
try:
grp_obj = objects.Group.get_by_id(
context, grp_update['group_id'])
grp_obj.update(grp_update)
grp_obj.save()
except exception.GroupNotFound:
# Group may be deleted already. Log a warning
# and continue.
LOG.warning("Group %(grp)s not found while "
"updating driver status.",
{'grp': grp_update['group_id']},
resource={
'type': 'group',
'id': grp_update['group_id']})
for vol_update in volume_model_updates:
try:
vol_obj = objects.Volume.get_by_id(
context, vol_update['volume_id'])
vol_obj.update(vol_update)
vol_obj.save()
except exception.VolumeNotFound:
# Volume may be deleted already. Log a warning
# and continue.
LOG.warning("Volume %(vol)s not found while "
"updating driver status.",
{'vol': vol_update['volume_id']},
resource={
'type': 'volume',
'id': vol_update['volume_id']})
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools:
if isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
else:
raise exception.ProgrammingError(
reason='Pools stats reported by the driver are not '
'reported in a list')
# For drivers that are not reporting their stats by pool we will use
# the data from the special fixed pool created by
# _count_allocated_capacity.
elif self.stats.get('pools'):
vol_stats.update(next(iter(self.stats['pools'].values())))
# This is a special subcase of the above no pool case that happens when
# we don't have any volumes yet.
else:
vol_stats.update(self.stats)
vol_stats.pop('pools', None)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
@periodic_task.periodic_task(spacing=CONF.backend_stats_polling_interval)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
volume_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
volume_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except exception.TargetUpdateFailed:
# We just want to log this but continue on with quota commit
LOG.warning('Volume extended but failed to update target.')
except Exception:
LOG.exception("Extend volume failed.",
resource=volume)
self.message_api.create(
context,
message_field.Action.EXTEND_VOLUME,
resource_uuid=volume.id,
detail=message_field.Detail.DRIVER_FAILED_EXTEND)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
attachments = volume.volume_attachment
if not attachments:
orig_volume_status = 'available'
else:
orig_volume_status = 'in-use'
volume.update({'size': int(new_size), 'status': orig_volume_status})
volume.save()
if orig_volume_status == 'in-use':
nova_api = compute.API()
instance_uuids = [attachment.instance_uuid
for attachment in attachments]
nova_api.extend_volume(context, instance_uuids, volume.id)
pool = volume_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info("Extend volume completed successfully.",
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
volume_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
volume_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
if old_reservations:
QUOTAS.rollback(context, old_reservations)
if new_reservations:
QUOTAS.rollback(context, new_reservations)
previous_status = (
volume.previous_status or volume.status)
status_update = {'status': previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# the current. If it's not don't call the driver.retype method,
# otherwise drivers that implement retype may report success, but it's
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info("Volume %s: retyped successfully.", volume.id)
except Exception:
retyped = False
LOG.exception("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism.", volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if(rep_status is not None and rep_status not in
[fields.ReplicationStatus.DISABLED,
fields.ReplicationStatus.NOT_CAPABLE]):
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info("Retype volume completed successfully.",
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
"""Update replication_status in model_update if it has changed."""
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = volume_utils.is_boolean_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info("Manage existing volume completed successfully.",
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = volume_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_cluster_or_host_filters(self):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return filters
def _get_my_volumes_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.VolumeList.get_volume_summary(ctxt, False, filters)
def _get_my_snapshots_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.SnapshotList.get_snapshot_summary(ctxt, False, filters)
def _get_my_resources(self, ctxt, ovo_class_list, limit=None, offset=None):
filters = self._get_cluster_or_host_filters()
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters,
limit=limit,
offset=offset)
def _get_my_volumes(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.VolumeList,
limit, offset)
def _get_my_snapshots(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.SnapshotList,
limit, offset)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to uninitialized driver.")
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable volumes.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to driver error.")
return driver_entries
def create_group(self, context, group):
"""Creates the group."""
context = context.elevated()
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
self._notify_about_group_usage(context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info("Group %s: creating", group.name)
try:
model_update = self.driver.create_group(context, group)
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update = self._create_group_generic(context, group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Group %s: create failed",
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info("Group %s: created successfully", group.name)
self._notify_about_group_usage(context, group, "create.end")
LOG.info("Create group completed successfully.",
resource={'type': 'group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot.refresh()
except exception.GroupSnapshotNotFound:
LOG.error("Create group from snapshot-%(snap)s failed: "
"SnapshotNotFound.",
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group.refresh()
except exception.GroupNotFound:
LOG.error("Create group "
"from source group-%(group)s failed: "
"GroupNotFound.",
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Create group "
"from source %(source)s failed.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info("Create group "
"from source-%(source)s completed successfully.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
model_update = {'status': 'available'}
volumes_model_update = []
for vol in volumes:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
vol_model_update = {'id': vol.id}
try:
driver_update = (
self.driver.create_volume_from_snapshot(
vol, snapshot))
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
elif source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
vol_model_update = {'id': vol.id}
try:
driver_update = self.driver.create_cloned_volume(
vol, source_vol)
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
return model_update, volumes_model_update
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error("Source snapshot cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error("Source volumes cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error("Source snapshot %(snapshot_id)s cannot be found.",
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.VolumeNotFound:
LOG.error("The source volume %(volume_id)s "
"cannot be found.",
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.CinderException as ex:
LOG.error("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.",
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol, decrement=False, host=None):
# Update allocated capacity in volume stats
host = host or vol['host']
pool = volume_utils.extract_host(host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(host,
'pool',
True)
vol_size = -vol['size'] if decrement else vol['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += vol_size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=max(vol_size, 0))
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info("Delete group "
"completed successfully.",
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
# Populate consistencygroup object
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def _collect_volumes_for_group(self, context, group, volumes, add=True):
if add:
valid_status = VALID_ADD_VOL_TO_GROUP_STATUS
else:
valid_status = VALID_REMOVE_VOL_FROM_GROUP_STATUS
volumes_ref = []
if not volumes:
return volumes_ref
for add_vol in volumes.split(','):
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error("Update group "
"failed to %(op)s volume-%(volume_id)s: "
"VolumeNotFound.",
{'volume_id': add_vol,
'op': 'add' if add else 'remove'},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in valid_status:
msg = (_("Can not %(op)s volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': valid_status,
'op': 'add' if add else 'remove'})
raise exception.InvalidVolume(reason=msg)
if add:
self._check_is_our_resource(add_vol_ref)
volumes_ref.append(add_vol_ref)
return volumes_ref
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = self._collect_volumes_for_group(context,
group,
add_volumes,
add=True)
remove_volumes_ref = self._collect_volumes_for_group(context,
group,
remove_volumes,
add=False)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, cg,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
volumes_to_update = []
if add_volumes_update:
volumes_to_update.extend(add_volumes_update)
if remove_volumes_update:
volumes_to_update.extend(remove_volumes_update)
self.db.volumes_update(context, volumes_to_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except Exception as e:
with excutils.save_and_reraise_exception():
if isinstance(e, exception.VolumeDriverException):
LOG.error("Error occurred in the volume driver when "
"updating group %(group_id)s.",
{'group_id': group.id})
else:
LOG.error("Failed to update group %(group_id)s.",
{'group_id': group.id})
group.status = fields.GroupStatus.ERROR
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
if isinstance(e, exception.VolumeDriverException):
rem_vol.consistencygroup_id = None
rem_vol.consistencygroup = None
rem_vol.status = 'error'
rem_vol.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
group.status = fields.GroupStatus.AVAILABLE
group.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info("Update group completed successfully.",
resource={'type': 'group',
'id': group.id})
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info("GroupSnapshot %s: creating.", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
[fields.GroupSnapshotStatus.ERROR_DELETING,
fields.GroupSnapshotStatus.ERROR]):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == fields.GroupSnapshotStatus.ERROR:
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata.",
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = fields.GroupSnapshotStatus.AVAILABLE
group_snapshot.save()
LOG.info("group_snapshot %s: created successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
driver_update = self.driver.create_snapshot(snapshot)
if driver_update:
driver_update.pop('id', None)
snapshot_model_update.update(driver_update)
if 'status' not in snapshot_model_update:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = (
fields.SnapshotStatus.DELETED)
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info("group_snapshot %s: deleting", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
reserve_opts = {'snapshots': -1}
if not CONF.no_snapshot_gb_quota:
reserve_opts['gigabytes'] = -snapshot.volume_size
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Failed to update usages deleting snapshot")
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info("group_snapshot %s: deleted successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceetable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
updates = {}
repl_status = fields.ReplicationStatus
service = self._get_service()
# TODO(geguileo): We should optimize these updates by doing them
# directly on the DB with just 3 queries, one to change the volumes
# another to change all the snapshots, and another to get replicated
# volumes.
# Change non replicated volumes and their snapshots to error if we are
# failing over, leave them as they are for failback
volumes = self._get_my_volumes(context)
replicated_vols = []
for volume in volumes:
if volume.replication_status not in (repl_status.DISABLED,
repl_status.NOT_CAPABLE):
replicated_vols.append(volume)
elif secondary_backend_id != self.FAILBACK_SENTINEL:
volume.previous_status = volume.status
volume.status = 'error'
volume.replication_status = repl_status.NOT_CAPABLE
volume.save()
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
volume_update_list = None
group_update_list = None
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
# It includes volumes in replication groups and those not in them
# expected form of group_update_list:
# [{group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}},
# {group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}}]
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(context,
filters=filters)
active_backend_id, volume_update_list, group_update_list = (
failover(context,
replicated_vols,
secondary_id=secondary_backend_id,
groups=groups))
try:
update_data = {u['volume_id']: u['updates']
for u in volume_update_list}
except KeyError:
msg = "Update list, doesn't include volume_id"
raise exception.ProgrammingError(reason=msg)
try:
update_group_data = {g['group_id']: g['updates']
for g in group_update_list}
except KeyError:
msg = "Update list, doesn't include group_id"
raise exception.ProgrammingError(reason=msg)
except Exception as exc:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
# We don't want to log the exception trace invalid replication
# target
if isinstance(exc, exception.InvalidReplicationTarget):
log_method = LOG.error
# Preserve the replication_status: Status should be failed over
# if we were failing back or if we were failing over from one
# secondary to another secondary. In both cases
# active_backend_id will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
else:
log_method = LOG.exception
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
log_method("Error encountered during failover on host: %(host)s "
"to %(backend_id)s: %(error)s",
{'host': self.host, 'backend_id': secondary_backend_id,
'error': exc})
# We dump the update list for manual recovery
LOG.error('Failed update_list is: %s', volume_update_list)
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for volume in replicated_vols:
update = update_data.get(volume.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
# Set all volume snapshots to error
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
if 'status' in update:
update['previous_status'] = volume.status
volume.update(update)
volume.save()
for grp in groups:
update = update_group_data.get(grp.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
grp.update(update)
grp.save()
LOG.info("Failed over to replication target successfully.")
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
"""Completion of the failover locally or via RPC."""
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
"""Finalize failover of this backend.
When a service is clustered and replicated the failover has 2 stages,
one that does the failover of the volumes and another that finalizes
the failover of the services themselves.
This method takes care of the last part and is called from the service
doing the failover of the volumes after finished processing the
volumes.
"""
service = self._get_service()
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.')
service = self._get_service()
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info("Set backend status to frozen successfully.")
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.')
return False
service = self._get_service()
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info("Thawed backend successfully.")
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
LOG.exception("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.",
{'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to uninitialized driver.")
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable snapshots.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to driver error.")
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
@utils.trace
def get_backup_device(self, ctxt, backup, want_objects=False,
async_call=False):
try:
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
except Exception as ex:
if async_call:
LOG.exception("Failed to get backup device. "
"Calling backup continue_backup to cleanup")
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device=None)
return
else:
while excutils.save_and_reraise_exception():
LOG.exception("Failed to get backup device.")
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
# TODO(sborkows): from_primitive method will be removed in O, so there
# is a need to clean here then.
backup_device = (
objects.BackupDeviceInfo.from_primitive(backup_device_dict, ctxt)
if want_objects else backup_device_dict)
if async_call:
# we have to use an rpc call back to the backup manager to
# continue the backup
LOG.info("Calling backup continue_backup for: {}".format(backup))
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device)
else:
# The rpc api version doesn't support the async callback
# so we fallback to returning the value itself.
return backup_device
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching',
'connector': jsonutils.dumps(connector)}
# TODO(mriedem): Use VolumeAttachment.save() here.
self.db.volume_attachment_update(ctxt, attachment.id, values)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
"""Update/Finalize an attachment.
This call updates a valid attachment record to associate with a volume
and provide the caller with the proper connection info. Note that
this call requires an `attachment_ref`. It's expected that prior to
this call that the volume and an attachment UUID has been reserved.
param: vref: Volume object to create attachment for
param: connector: Connector object to use for attachment creation
param: attachment_ref: ID of the attachment record to update
"""
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
# Check to see if a mode parameter was set during attachment-create;
# this seems kinda wonky, but it's how we're keeping back compatability
# with the use of connector.mode for now. In other words, we're
# making sure we still honor ro settings from the connector but
# we override that if a value was specified in attachment-create
if attachment_ref.attach_mode != 'null':
mode = attachment_ref.attach_mode
connector['mode'] = mode
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'))
except Exception as err:
self.message_api.create(
context, message_field.Action.UPDATE_ATTACHMENT,
resource_uuid=vref.id,
exception=err)
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status':
fields.VolumeAttachStatus.ERROR_ATTACHING})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'),
mode,
False)
vref.refresh()
attachment_ref.refresh()
LOG.info("attachment_update completed successfully.",
resource=vref)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
"""Remove a volume connection, but leave attachment.
Exits early if the attachment does not have a connector and returns
None to indicate shared connections are irrelevant.
"""
utils.require_driver_initialized(self.driver)
connector = attachment.connector
if not connector and not force:
# It's possible to attach a volume to a shelved offloaded server
# in nova, and a shelved offloaded server is not on a compute host,
# which means the attachment was made without a host connector,
# so if we don't have a connector we can't terminate a connection
# that was never actually made to the storage backend, so just
# log a message and exit.
LOG.debug('No connector for attachment %s; skipping storage '
'backend terminate_connection call.', attachment.id)
# None indicates we don't know and don't care.
return None
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
# going on here.
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
"""Delete/Detach the specified attachment.
Notifies the backend device that we're detaching the specified
attachment instance.
param: vref: Volume object associated with the attachment
param: attachment: Attachment reference object to remove
NOTE if the attachment reference is None, we remove all existing
attachments for the specified volume object.
"""
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
self._do_attachment_delete(context, vref, attachment)
else:
self._do_attachment_delete(context, vref, attachment_ref)
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
if has_shared_connection is not None and not has_shared_connection:
self.driver.remove_export(context.elevated(), vref)
except Exception:
# FIXME(jdg): Obviously our volume object is going to need some
# changes to deal with multi-attach and figuring out how to
# represent a single failed attach out of multiple attachments
# TODO(jdg): object method here
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
# Replication group API (Tiramisu)
def enable_replication(self, ctxt, group):
"""Enable replication."""
group.refresh()
if group.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in group %s is not "
"enabling. Cannot enable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in volume %s is not "
"enabling. Cannot enable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "enable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.enable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
# If we failed to enable a volume, make sure the status
# for the group is set to error as well
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Enable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Enable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.ENABLED
vol.save()
group.replication_status = fields.ReplicationStatus.ENABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "enable_replication.end", volumes)
LOG.info("Enable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
# Replication group API (Tiramisu)
def disable_replication(self, ctxt, group):
"""Disable replication."""
group.refresh()
if group.replication_status != fields.ReplicationStatus.DISABLING:
msg = _("Replication status in group %s is not "
"disabling. Cannot disable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if (vol.replication_status !=
fields.ReplicationStatus.DISABLING):
msg = _("Replication status in volume %s is not "
"disabling. Cannot disable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "disable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.disable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
# If we failed to enable a volume, make sure the status
# for the group is set to error as well
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Disable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Disable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.DISABLED
vol.save()
group.replication_status = fields.ReplicationStatus.DISABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "disable_replication.end", volumes)
LOG.info("Disable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
# Replication group API (Tiramisu)
def failover_replication(self, ctxt, group, allow_attached_volume=False,
secondary_backend_id=None):
"""Failover replication."""
group.refresh()
if group.replication_status != fields.ReplicationStatus.FAILING_OVER:
msg = _("Replication status in group %s is not "
"failing-over. Cannot failover replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.status == 'in-use' and not allow_attached_volume:
msg = _("Volume %s is attached but allow_attached_volume flag "
"is False. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if (vol.replication_status !=
fields.ReplicationStatus.FAILING_OVER):
msg = _("Replication status in volume %s is not "
"failing-over. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "failover_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.failover_replication(
ctxt, group, volumes, secondary_backend_id))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
# If we failed to enable a volume, make sure the status
# for the group is set to error as well
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Failover replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Failover replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
if secondary_backend_id == "default":
vol.replication_status = fields.ReplicationStatus.ENABLED
else:
vol.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
vol.save()
if secondary_backend_id == "default":
group.replication_status = fields.ReplicationStatus.ENABLED
else:
group.replication_status = fields.ReplicationStatus.FAILED_OVER
group.save()
self._notify_about_group_usage(
ctxt, group, "failover_replication.end", volumes)
LOG.info("Failover replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def list_replication_targets(self, ctxt, group):
"""Provide a means to obtain replication targets for a group.
This method is used to find the replication_device config
info. 'backend_id' is a required key in 'replication_device'.
Response Example for admin:
.. code:: json
{
"replication_targets": [
{
"backend_id": "vendor-id-1",
"unique_key": "val1"
},
{
"backend_id": "vendor-id-2",
"unique_key": "val2"
}
]
}
Response example for non-admin:
.. code:: json
{
"replication_targets": [
{
"backend_id": "vendor-id-1"
},
{
"backend_id": "vendor-id-2"
}
]
}
"""
replication_targets = []
try:
group.refresh()
if self.configuration.replication_device:
if ctxt.is_admin:
for rep_dev in self.configuration.replication_device:
keys = rep_dev.keys()
dev = {}
for k in keys:
dev[k] = rep_dev[k]
replication_targets.append(dev)
else:
for rep_dev in self.configuration.replication_device:
dev = rep_dev.get('backend_id')
if dev:
replication_targets.append({'backend_id': dev})
except exception.GroupNotFound:
err_msg = (_("Get replication targets failed. Group %s not "
"found.") % group.id)
LOG.exception(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return {'replication_targets': replication_targets}
| 45.209275
| 79
| 0.554365
|
import requests
import time
from castellan import key_manager
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder.keymgr import migration as key_migration
from cinder import manager
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_migration
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help="FC Zoning mode configured, only 'fabric' is "
"supported now."),
cfg.IntOpt('reinit_driver_count',
default=3,
help='Maximum times to reintialize the driver '
'if volume initialization fails. The interval of retry is '
'exponentially backoff, and will be 1s, 2s, 4s etc.'),
cfg.IntOpt('init_host_max_objects_retrieval',
default=0,
help='Max number of volumes and snapshots to be retrieved '
'per batch during volume manager host initialization. '
'Query results will be obtained in batches from the '
'database and not in one shot to avoid extreme memory '
'usage. Set 0 to turn off this functionality.'),
cfg.IntOpt('backend_stats_polling_interval',
default=60,
min=3,
help='Time in seconds between requests for usage statistics '
'from the backend. Be aware that generating usage '
'statistics is expensive for some backends, so setting '
'this value too low may adversely affect performance.'),
]
volume_backend_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
cfg.IntOpt('backend_native_threads_pool_size',
default=20,
min=20,
help='Size of the native threads pool for the backend. '
'Increase for backends that heavily rely on this, like '
'the RBD driver.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
CONF.register_opts(volume_backend_opts, group=config.SHARED_CONF_GROUP)
MAPPING = {
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver',
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver':
'cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_iscsi.'
'FJDXISCSIDriver',
'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver':
'cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
FAILBACK_SENTINEL = 'default'
target = messaging.Target(version=RPC_API_VERSION)
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def _get_service(self, host=None, binary=constants.VOLUME_BINARY):
host = host or self.host
ctxt = context.get_admin_context()
svc_host = volume_utils.extract_host(host, 'backend')
return objects.Service.get_by_args(ctxt, svc_host, binary)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
# NOTE(dulek): service_name=None means we're running in unit tests.
service_name = service_name or 'backend_defaults'
self.configuration = config.Configuration(volume_backend_opts,
config_group=service_name)
self._set_tpool_size(
self.configuration.backend_native_threads_pool_size)
self.stats = {}
self.service_uuid = None
if not volume_driver:
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning("Driver path %s is deprecated, update your "
"configuration to the new path.", volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
curr_active_backend_id = None
try:
service = self._get_service()
except exception.ServiceNotFound:
LOG.info("Service not found for updating "
"active_backend_id, assuming default "
"for driver init.")
else:
curr_active_backend_id = service.active_backend_id
self.service_uuid = service.uuid
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning("Suppressing requests library SSL Warnings")
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
driver_additional_endpoints = []
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id,
additional_endpoints=driver_additional_endpoints)
self.additional_endpoints.extend(driver_additional_endpoints)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s",
self.driver.configuration.extra_capabilities)
backend_zone = self.driver.configuration.safe_get(
'backend_availability_zone')
if backend_zone:
self.availability_zone = backend_zone
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info('Image-volume cache enabled for host %(host)s.',
{'host': self.host})
else:
LOG.info('Image-volume cache disabled for host %(host)s.',
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = volume_utils.extract_host(volume['host'], 'pool')
if pool is None:
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception('Fetch volume pool name failed.',
resource=volume)
return
if pool:
new_host = volume_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info("Determined volume DB was empty at startup.")
return True
else:
LOG.info("Determined volume DB was not empty at startup.")
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
if snapshot_updates:
for snap in snapshots:
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info('Including all resources from host %(host)s in cluster '
'%(cluster)s.',
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_gs = objects.GroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'%(num_gs)s generic groups and %(num_cache)s image '
'volume caches from host %(host)s have been included in '
'cluster %(cluster)s.',
{'num_vols': num_vols, 'num_cgs': num_cgs, 'num_gs': num_gs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf",
resource={'type': 'driver',
'id': self.__class__.__name__})
return
self._init_host(added_to_cluster, **kwargs)
if not self.driver.initialized:
reinit_count = 0
while reinit_count < CONF.reinit_driver_count:
time.sleep(2 ** reinit_count)
self._init_host(added_to_cluster, **kwargs)
if self.driver.initialized:
return
reinit_count += 1
def _init_host(self, added_to_cluster=None, **kwargs):
ctxt = context.get_admin_context()
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info("Starting volume driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
# Zero stats
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
# Batch retrieval volumes and snapshots
num_vols, num_snaps, max_objs_num, req_range = None, None, None, [0]
req_limit = CONF.init_host_max_objects_retrieval
use_batch_objects_retrieval = req_limit > 0
if use_batch_objects_retrieval:
# Get total number of volumes
num_vols, __, __ = self._get_my_volumes_summary(ctxt)
# Get total number of snapshots
num_snaps, __ = self._get_my_snapshots_summary(ctxt)
# Calculate highest number of the objects (volumes or snapshots)
max_objs_num = max(num_vols, num_snaps)
# Make batch request loop counter
req_range = range(0, max_objs_num, req_limit)
volumes_to_migrate = volume_migration.VolumeMigrationList()
for req_offset in req_range:
# Retrieve 'req_limit' number of objects starting from
# 'req_offset' position
volumes, snapshots = None, None
if use_batch_objects_retrieval:
if req_offset < num_vols:
volumes = self._get_my_volumes(ctxt,
limit=req_limit,
offset=req_offset)
else:
volumes = objects.VolumeList()
if req_offset < num_snaps:
snapshots = self._get_my_snapshots(ctxt,
limit=req_limit,
offset=req_offset)
else:
snapshots = objects.SnapshotList()
# or retrieve all volumes and snapshots per single request
else:
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception("Failed to re-export volume, "
"setting to ERROR.",
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class -
# CleanableManager
except Exception:
LOG.exception("Error during re-export on driver init.",
resource=volume)
return
if len(volumes):
volumes_to_migrate.append(volumes, ctxt)
del volumes
del snapshots
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
self.driver.set_initialized()
backend_name = volume_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
self._add_to_threadpool(key_migration.migrate_fixed_key,
volumes=volumes_to_migrate)
self.publish_service_capabilities(ctxt)
LOG.info("Driver initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error("Cannot complete RPC initialization because "
"driver isn't initialized properly.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
try:
service = self._get_service()
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Service not found for updating replication_status.")
if service.replication_status != fields.ReplicationStatus.FAILED_OVER:
if stats and stats.get('replication_enabled', False):
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
if replication_status != service.replication_status:
service.replication_status = replication_status
service.save()
# Update the cluster replication status if necessary
cluster = service.cluster
if (cluster and
cluster.replication_status != service.replication_status):
cluster.replication_status = service.replication_status
cluster.save()
LOG.info("Driver post RPC initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
return self.driver.initialized
def _set_resource_host(self, resource):
if (resource.is_clustered and
not volume_utils.hosts_are_equivalent(resource.host,
self.host)):
pool = volume_utils.extract_host(resource.host, 'pool')
resource.host = volume_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
utils.log_unsupported_driver_warning(self.driver)
self._set_resource_host(volume)
self._update_allocated_capacity(volume)
original_host = volume.host
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
if snapshot_id is not None:
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
locked_action = "%s-%s" % (source_volid, 'delete_volume')
else:
locked_action = None
def _run_flow():
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.COORDINATOR.get_lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if rescheduled:
# Volume.host is None now, so we pass the original host value.
self._update_allocated_capacity(volume, decrement=True,
host=original_host)
# Shared targets is only relevant for iSCSI connections.
# We default to True to be on the safe side.
volume.shared_targets = (
self.driver.capabilities.get('storage_protocol') == 'iSCSI' and
self.driver.capabilities.get('shared_targets', True))
# TODO(geguileo): service_uuid won't be enough on Active/Active
volume.service_uuid = self.service_uuid
volume.save()
LOG.info("Created volume successfully.", resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = volume_utils.extract_host(
resource.service_topic_queue)
backend = volume_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and volume.encryption_key_id is not None:
raise exception.Invalid(
reason=_("Unmanaging encrypted volumes is not "
"supported."))
if unmanage_only and cascade:
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
is_temp_vol = False
with volume.obj_as_admin():
if volume.admin_metadata.get('temporary', 'False') == 'True':
is_temp_vol = True
LOG.info("Trying to delete temp volume: %s", volume.id)
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
if not is_temp_vol:
self._notify_about_volume_usage(context, volume, notification)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != fields.SnapshotStatus.DELETING:
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error("Unable to delete busy volume.",
resource=volume)
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
skip_quota = is_migrating or is_temp_vol
if not skip_quota:
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception("Failed to update usages deleting volume.",
resource=volume)
volume.destroy()
if not skip_quota:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self._update_allocated_capacity(volume, decrement=True)
self.publish_service_capabilities(context)
msg = "Deleted volume successfully."
if unmanage_only:
msg = "Unmanaged volume successfully."
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
if is_migrating_dest:
volume_ref.destroy()
LOG.error("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted).", resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def _revert_to_snapshot_generic(self, ctxt, volume, snapshot):
temp_vol = None
try:
v_options = {'display_name': '[revert] temporary volume created '
'from snapshot %s' % snapshot.id}
ctxt = context.get_internal_tenant_context() or ctxt
temp_vol = self.driver._create_temp_volume_from_snapshot(
ctxt, volume, snapshot, volume_options=v_options)
self._copy_volume_data(ctxt, temp_vol, volume)
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to use snapshot %(snapshot)s to create "
"a temporary volume and copy data to volume "
" %(volume)s.",
{'snapshot': snapshot.id,
'volume': volume.id})
if temp_vol and temp_vol.status == 'available':
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
def _revert_to_snapshot(self, context, volume, snapshot):
try:
self.driver.revert_to_snapshot(context, volume, snapshot)
except (NotImplementedError, AttributeError):
LOG.info("Driver's 'revert_to_snapshot' is not found. "
"Try to use copy-snapshot-to-volume method.")
self._revert_to_snapshot_generic(context, volume, snapshot)
def _create_backup_snapshot(self, context, volume):
kwargs = {
'volume_id': volume.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume.size,
'display_name': '[revert] volume %s backup snapshot' % volume.id,
'display_description': 'This is only used for backup when '
'reverting. If the reverting process '
'failed, you can restore you data by '
'creating new volume with this snapshot.',
'volume_type_id': volume.volume_type_id,
'encryption_key_id': volume.encryption_key_id,
'metadata': {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
self.create_snapshot(context, snapshot)
return snapshot
def revert_to_snapshot(self, context, volume, snapshot):
backup_snapshot = None
try:
LOG.info("Start to perform revert to snapshot process.")
self._notify_about_volume_usage(context, volume,
"revert.start")
self._notify_about_snapshot_usage(context, snapshot,
"revert.start")
# Create a snapshot which can be used to restore the volume
# data by hand if revert process failed.
if self.driver.snapshot_revert_use_temp_snapshot():
backup_snapshot = self._create_backup_snapshot(context,
volume)
self._revert_to_snapshot(context, volume, snapshot)
except Exception as error:
with excutils.save_and_reraise_exception():
self._notify_about_volume_usage(context, volume,
"revert.end")
self._notify_about_snapshot_usage(context, snapshot,
"revert.end")
msg = ('Volume %(v_id)s revert to '
'snapshot %(s_id)s failed with %(error)s.')
msg_args = {'v_id': volume.id,
's_id': snapshot.id,
'error': six.text_type(error)}
v_res = volume.update_single_status_where(
'error',
'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'error'}
msg += ("Failed to reset volume %(id)s "
"status to %(status)s.") % msg_args
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg += ("Failed to reset snapshot %(id)s "
"status to %(status)s." % msg_args)
LOG.exception(msg, msg_args)
v_res = volume.update_single_status_where(
'available', 'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'available'}
msg = _("Revert finished, but failed to reset "
"volume %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg = _("Revert finished, but failed to reset "
"snapshot %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
if backup_snapshot:
self.delete_snapshot(context,
backup_snapshot, handle_quota=False)
msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s '
'successfully.')
msg_args = {'v_id': volume.id, 'snap_id': snapshot.id}
LOG.info(msg, msg_args)
self._notify_about_volume_usage(context, volume, "revert.end")
self._notify_about_snapshot_usage(context, snapshot, "revert.end")
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception as create_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=create_error,
detail=message_field.Detail.SNAPSHOT_CREATE_ERROR)
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata",
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=ex,
detail=message_field.Detail.SNAPSHOT_UPDATE_METADATA_FAILED
)
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
# Resync with the volume's DB value. This addresses the case where
# fixed_key encryption key ID was migrated to Barbican.
snapshot.encryption_key_id = vol_ref.encryption_key_id
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info("Create snapshot completed successfully",
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot,
unmanage_only=False, handle_quota=True):
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy as busy_error:
LOG.error("Delete snapshot failed, due to snapshot busy.",
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=busy_error)
return
except Exception as delete_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=delete_error,
detail=message_field.Detail.SNAPSHOT_DELETE_ERROR)
# Get reservations
reservations = None
try:
if handle_quota:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Update snapshot usages failed.",
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = "Delete snapshot completed successfully."
if unmanage_only:
msg = "Unmanage snapshot completed successfully."
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
host_name_sanitized = volume_utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached and multiple attachments "
"are not enabled"))
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
try:
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception as excep:
with excutils.save_and_reraise_exception():
self.message_api.create(
context,
message_field.Action.ATTACH_VOLUME,
resource_uuid=volume_id,
exception=excep)
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info("Attach volume completed successfully.",
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
volume.finish_detach(attachment_id)
return
else:
attachments = volume.volume_attachment
if len(attachments) > 1:
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# so set the status to available and move on.
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Detach volume failed, due to "
"uninitialized driver.",
resource=volume)
except Exception as ex:
LOG.exception("Detach volume failed, due to "
"remove-export failure.",
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info("Detach volume completed successfully.", resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
cache_entry = self.image_volume_cache.get_entry(ctx,
volume_ref,
image_id,
image_meta)
if cache_entry:
LOG.debug('Cache entry already exists with image ID %'
'(image_id)s',
{'image_id': image_id})
return
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.',
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.',
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning('Failed to create new image-volume cache entry.'
' Error: %(exception)s', {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
# NOTE(yikun): Skip 'snapshot_id', 'source_volid' keys to avoid
# creating tmp img vol from wrong snapshot or wrong source vol.
skip = {'snapshot_id', 'source_volid'}
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception('Create clone_image_volume: %(volume_id)s '
'for image %(image_id)s, '
'failed (Exception: %(except)s)',
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume.refresh()
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.',
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception('Could not delete the image volume %(id)s.',
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception('Failed to register image volume location '
'%(uri)s.', {'uri': uri})
if not image_registered:
LOG.warning('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.',
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception('Could not delete failed image volume '
'%(id)s.', {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error("Upload volume to image encountered an error "
"(image-id: %(image_id)s).",
{'image_id': image_meta['id']},
resource=volume)
self.message_api.create(
context,
message_field.Action.COPY_VOLUME_TO_IMAGE,
resource_uuid=volume_id,
exception=error,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info("Copy volume to image completed successfully.",
resource=volume)
def _delete_image(self, context, image_id, image_service):
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning("Deleting image in unexpected status: "
"%(image_status)s.",
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning("Image delete encountered an error.",
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
# NOTE(mnaser): The following configures for per-GB QoS
if specs is not None:
volume_size = int(volume.size)
tune_opts = ('read_iops_sec', 'read_bytes_sec',
'write_iops_sec', 'write_bytes_sec',
'total_iops_sec', 'total_bytes_sec')
for option in tune_opts:
option_per_gb = '%s_per_gb' % option
option_per_gb_min = '%s_per_gb_min' % option
option_max = '%s_max' % option
if option_per_gb in specs:
minimum_value = int(specs.pop(option_per_gb_min, 0))
value = int(specs[option_per_gb]) * volume_size
per_gb_value = max(minimum_value, value)
max_value = int(specs.pop(option_max, per_gb_value))
specs[option] = min(per_gb_value, max_value)
specs.pop(option_per_gb)
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except Exception as ex:
LOG.exception("Model update failed.", resource=volume)
try:
self.driver.remove_export(context.elevated(), volume)
except Exception:
LOG.exception('Could not remove export after DB model failed.')
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info("Initialize volume connection completed successfully.",
resource=volume)
return conn_info
def initialize_connection_snapshot(self, ctxt, snapshot_id, connector):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate snapshot connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
model_update = None
try:
LOG.debug("Snapshot %s: creating export.", snapshot.id)
model_update = self.driver.create_export_snapshot(
ctxt.elevated(), snapshot, connector)
if model_update:
snapshot.provider_location = model_update.get(
'provider_location', None)
snapshot.provider_auth = model_update.get(
'provider_auth', None)
snapshot.save()
except exception.CinderException as ex:
msg = _("Create export of snapshot failed (%s)") % ex.msg
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
snapshot.update(model_update)
snapshot.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=snapshot)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn = self.driver.initialize_connection_snapshot(snapshot,
connector)
except Exception as err:
try:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') %
{'err': six.text_type(err)})
LOG.error(err_msg)
LOG.debug("Cleaning up failed connect initialization.")
self.driver.remove_export_snapshot(ctxt.elevated(), snapshot)
except Exception as ex:
ex_msg = (_('Error encountered during cleanup '
'of a failed attach: %(ex)s') %
{'ex': six.text_type(ex)})
LOG.error(ex_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Initialize snapshot connection completed successfully.",
resource=snapshot)
return conn
def terminate_connection(self, context, volume_id, connector, force=False):
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume_ref)
def terminate_connection_snapshot(self, ctxt, snapshot_id,
connector, force=False):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.terminate_connection_snapshot(snapshot, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate snapshot connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate snapshot connection completed successfully.",
resource=snapshot)
def remove_export(self, context, volume_id):
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove volume export completed successfully.",
resource=volume_ref)
def remove_export_snapshot(self, ctxt, snapshot_id):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.remove_export_snapshot(ctxt, snapshot)
except Exception:
msg = _("Remove snapshot export failed.")
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove snapshot export completed successfully.",
resource=snapshot)
def accept_transfer(self, context, volume_id, new_user, new_project,
no_snapshots=False):
utils.require_driver_initialized(self.driver)
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception("Update volume model for "
"transfer operation failed.",
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info("Transfer volume completed successfully.",
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume %(vol)s.",
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume encryptor"
" %(vol)s.", {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties,
force=True)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'], force=force)
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error('Unable to terminate volume connection: '
'%(err)s.', {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
LOG.debug('_copy_volume_data %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
properties = utils.brick_get_connector_properties(use_multipath,
enforce_multipath)
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach source volume for copy.")
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor,
force=True)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to copy volume %(src)s to %(dest)s.",
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=True,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=True,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = {'host', 'cluster_name', 'availability_zone'}
skip.update(tmp_skip)
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = volume_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
dst_service = self._get_service(backend['host'])
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
availability_zone=dst_service.availability_zone,
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume.refresh()
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume.refresh()
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
# A volume might have attachments created, but if it is reserved
# it means it's being migrated prior to the attachment completion.
if not attachments or volume.status == 'reserved':
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to copy volume %(vol1)s to %(vol2)s", {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
if volume.migration_status == 'migrating':
try:
if clean_db_only:
new_volume.destroy()
else:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume.",
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase.",
{'vol': new_volume.id})
LOG.warning("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted.",
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
# and make this work best we can
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
volume_attachments = []
# NOTE(jdg): With new attach flow, we deleted the attachment, so the
# original volume should now be listed as available, we still need to
# do the magic swappy thing of name.id etc but we're done with the
# detach to toggle the status
if orig_volume_status == 'in-use' and volume.status != 'available':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error("Detach migration source volume "
"%(volume.id)s from attachment "
"%(attachment.id)s failed: %(err)s",
{'err': ex,
'volume.id': volume.id,
'attachment.id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# NOTE(jdg): With new attachment API's nova will delete the
# updating volume_id
# In the old flow at this point the volumes are in attaching and
# deleting status (dest/new is deleting, but we've done our magic
# when you step through it)
# In the new flow we simlified this and we don't need it, instead of
if orig_volume_status == 'in-use' and volume.status in ['available',
'reserved',
'attaching']:
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
# this
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
attachment.attach_mode or 'rw')
# At this point we now have done almost all of our swapping and
# state-changes. The target volume is now marked back to
# "in-use" the destination/worker volume is now in deleting
# state and the next steps will finish the deletion steps
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s',
{'vol': volume.id, 'err': ex})
# For the new flow this is really the key part. We just use the
# attachments to the worker/destination volumes that we created and
# used for the libvirt migration and we'll just swap their volume_id
for attachment in VA_LIST.get_all_by_volume_id(ctxt, updated_new.id):
attachment.volume_id = volume.id
attachment.save()
LOG.info("Complete-Migrate volume completed successfully.",
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
dst_service = self._get_service(host['host'])
updates = {
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'availability_zone': dst_service.availability_zone,
'previous_status': volume.status,
}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info("Migrate volume completed successfully.",
resource=volume)
def _report_driver_status(self, context):
# value isn't set (we didn't restart services), so we'll go ahead
if not self.service_uuid:
try:
service = self._get_service()
self.service_uuid = service.uuid
except exception.ServiceNotFound:
LOG.warning("Attempt to update service_uuid "
"resulted in a Service NotFound "
"exception, service_uuid field on "
"volumes will be NULL.")
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning("Update driver status failed: %(config_group)s "
"is uninitialized.",
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if "pools" in volume_stats:
for pool in volume_stats["pools"]:
pool.update(self.extra_capabilities)
else:
volume_stats.update(self.extra_capabilities)
if volume_stats:
if volume_stats.get('replication_status') == (
fields.ReplicationStatus.ERROR):
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(
context, filters=filters)
group_model_updates, volume_model_updates = (
self.driver.get_replication_error_status(context,
groups))
for grp_update in group_model_updates:
try:
grp_obj = objects.Group.get_by_id(
context, grp_update['group_id'])
grp_obj.update(grp_update)
grp_obj.save()
except exception.GroupNotFound:
LOG.warning("Group %(grp)s not found while "
"updating driver status.",
{'grp': grp_update['group_id']},
resource={
'type': 'group',
'id': grp_update['group_id']})
for vol_update in volume_model_updates:
try:
vol_obj = objects.Volume.get_by_id(
context, vol_update['volume_id'])
vol_obj.update(vol_update)
vol_obj.save()
except exception.VolumeNotFound:
LOG.warning("Volume %(vol)s not found while "
"updating driver status.",
{'vol': vol_update['volume_id']},
resource={
'type': 'volume',
'id': vol_update['volume_id']})
self._append_volume_stats(volume_stats)
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools:
if isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
else:
raise exception.ProgrammingError(
reason='Pools stats reported by the driver are not '
'reported in a list')
elif self.stats.get('pools'):
vol_stats.update(next(iter(self.stats['pools'].values())))
else:
vol_stats.update(self.stats)
vol_stats.pop('pools', None)
def _append_filter_goodness_functions(self, volume_stats):
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
@periodic_task.periodic_task(spacing=CONF.backend_stats_polling_interval)
def publish_service_capabilities(self, context):
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
volume_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
volume_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except exception.TargetUpdateFailed:
# We just want to log this but continue on with quota commit
LOG.warning('Volume extended but failed to update target.')
except Exception:
LOG.exception("Extend volume failed.",
resource=volume)
self.message_api.create(
context,
message_field.Action.EXTEND_VOLUME,
resource_uuid=volume.id,
detail=message_field.Detail.DRIVER_FAILED_EXTEND)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
attachments = volume.volume_attachment
if not attachments:
orig_volume_status = 'available'
else:
orig_volume_status = 'in-use'
volume.update({'size': int(new_size), 'status': orig_volume_status})
volume.save()
if orig_volume_status == 'in-use':
nova_api = compute.API()
instance_uuids = [attachment.instance_uuid
for attachment in attachments]
nova_api.extend_volume(context, instance_uuids, volume.id)
pool = volume_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info("Extend volume completed successfully.",
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
volume_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
volume_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
if old_reservations:
QUOTAS.rollback(context, old_reservations)
if new_reservations:
QUOTAS.rollback(context, new_reservations)
previous_status = (
volume.previous_status or volume.status)
status_update = {'status': previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# for now.
volume.update(status_update)
volume.save()
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info("Volume %s: retyped successfully.", volume.id)
except Exception:
retyped = False
LOG.exception("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism.", volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if(rep_status is not None and rep_status not in
[fields.ReplicationStatus.DISABLED,
fields.ReplicationStatus.NOT_CAPABLE]):
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info("Retype volume completed successfully.",
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = volume_utils.is_boolean_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info("Manage existing volume completed successfully.",
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
pool = volume_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_cluster_or_host_filters(self):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return filters
def _get_my_volumes_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.VolumeList.get_volume_summary(ctxt, False, filters)
def _get_my_snapshots_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.SnapshotList.get_snapshot_summary(ctxt, False, filters)
def _get_my_resources(self, ctxt, ovo_class_list, limit=None, offset=None):
filters = self._get_cluster_or_host_filters()
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters,
limit=limit,
offset=offset)
def _get_my_volumes(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.VolumeList,
limit, offset)
def _get_my_snapshots(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.SnapshotList,
limit, offset)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to uninitialized driver.")
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable volumes.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to driver error.")
return driver_entries
def create_group(self, context, group):
context = context.elevated()
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
self._notify_about_group_usage(context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info("Group %s: creating", group.name)
try:
model_update = self.driver.create_group(context, group)
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update = self._create_group_generic(context, group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Group %s: create failed",
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info("Group %s: created successfully", group.name)
self._notify_about_group_usage(context, group, "create.end")
LOG.info("Create group completed successfully.",
resource={'type': 'group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
group_snapshot.refresh()
except exception.GroupSnapshotNotFound:
LOG.error("Create group from snapshot-%(snap)s failed: "
"SnapshotNotFound.",
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group.refresh()
except exception.GroupNotFound:
LOG.error("Create group "
"from source group-%(group)s failed: "
"GroupNotFound.",
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Create group "
"from source %(source)s failed.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info("Create group "
"from source-%(source)s completed successfully.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
model_update = {'status': 'available'}
volumes_model_update = []
for vol in volumes:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
vol_model_update = {'id': vol.id}
try:
driver_update = (
self.driver.create_volume_from_snapshot(
vol, snapshot))
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
elif source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
vol_model_update = {'id': vol.id}
try:
driver_update = self.driver.create_cloned_volume(
vol, source_vol)
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
return model_update, volumes_model_update
def _sort_snapshots(self, volumes, snapshots):
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error("Source snapshot cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error("Source volumes cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error("Source snapshot %(snapshot_id)s cannot be found.",
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.VolumeNotFound:
LOG.error("The source volume %(volume_id)s "
"cannot be found.",
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.CinderException as ex:
LOG.error("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.",
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol, decrement=False, host=None):
host = host or vol['host']
pool = volume_utils.extract_host(host, 'pool')
if pool is None:
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(host,
'pool',
True)
vol_size = -vol['size'] if decrement else vol['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += vol_size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=max(vol_size, 0))
def delete_group(self, context, group):
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
for vol in volumes:
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
vol.destroy()
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info("Delete group "
"completed successfully.",
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
return None, None, None
def _collect_volumes_for_group(self, context, group, volumes, add=True):
if add:
valid_status = VALID_ADD_VOL_TO_GROUP_STATUS
else:
valid_status = VALID_REMOVE_VOL_FROM_GROUP_STATUS
volumes_ref = []
if not volumes:
return volumes_ref
for add_vol in volumes.split(','):
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error("Update group "
"failed to %(op)s volume-%(volume_id)s: "
"VolumeNotFound.",
{'volume_id': add_vol,
'op': 'add' if add else 'remove'},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in valid_status:
msg = (_("Can not %(op)s volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': valid_status,
'op': 'add' if add else 'remove'})
raise exception.InvalidVolume(reason=msg)
if add:
self._check_is_our_resource(add_vol_ref)
volumes_ref.append(add_vol_ref)
return volumes_ref
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
add_volumes_ref = self._collect_volumes_for_group(context,
group,
add_volumes,
add=True)
remove_volumes_ref = self._collect_volumes_for_group(context,
group,
remove_volumes,
add=False)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, cg,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
volumes_to_update = []
if add_volumes_update:
volumes_to_update.extend(add_volumes_update)
if remove_volumes_update:
volumes_to_update.extend(remove_volumes_update)
self.db.volumes_update(context, volumes_to_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except Exception as e:
with excutils.save_and_reraise_exception():
if isinstance(e, exception.VolumeDriverException):
LOG.error("Error occurred in the volume driver when "
"updating group %(group_id)s.",
{'group_id': group.id})
else:
LOG.error("Failed to update group %(group_id)s.",
{'group_id': group.id})
group.status = fields.GroupStatus.ERROR
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
if isinstance(e, exception.VolumeDriverException):
rem_vol.consistencygroup_id = None
rem_vol.consistencygroup = None
rem_vol.status = 'error'
rem_vol.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
group.status = fields.GroupStatus.AVAILABLE
group.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info("Update group completed successfully.",
resource={'type': 'group',
'id': group.id})
def create_group_snapshot(self, context, group_snapshot):
caller_context = context
context = context.elevated()
LOG.info("GroupSnapshot %s: creating.", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
[fields.GroupSnapshotStatus.ERROR_DELETING,
fields.GroupSnapshotStatus.ERROR]):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == fields.GroupSnapshotStatus.ERROR:
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
pass
except exception.CinderException as ex:
LOG.error("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata.",
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = fields.GroupSnapshotStatus.AVAILABLE
group_snapshot.save()
LOG.info("group_snapshot %s: created successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
driver_update = self.driver.create_snapshot(snapshot)
if driver_update:
driver_update.pop('id', None)
snapshot_model_update.update(driver_update)
if 'status' not in snapshot_model_update:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = (
fields.SnapshotStatus.DELETED)
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_group_snapshot(self, context, group_snapshot):
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info("group_snapshot %s: deleting", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
try:
reserve_opts = {'snapshots': -1}
if not CONF.no_snapshot_gb_quota:
reserve_opts['gigabytes'] = -snapshot.volume_size
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Failed to update usages deleting snapshot")
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info("group_snapshot %s: deleted successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
updates = {}
repl_status = fields.ReplicationStatus
service = self._get_service()
# TODO(geguileo): We should optimize these updates by doing them
# directly on the DB with just 3 queries, one to change the volumes
# another to change all the snapshots, and another to get replicated
# volumes.
# Change non replicated volumes and their snapshots to error if we are
# failing over, leave them as they are for failback
volumes = self._get_my_volumes(context)
replicated_vols = []
for volume in volumes:
if volume.replication_status not in (repl_status.DISABLED,
repl_status.NOT_CAPABLE):
replicated_vols.append(volume)
elif secondary_backend_id != self.FAILBACK_SENTINEL:
volume.previous_status = volume.status
volume.status = 'error'
volume.replication_status = repl_status.NOT_CAPABLE
volume.save()
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
volume_update_list = None
group_update_list = None
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
# It includes volumes in replication groups and those not in them
# expected form of group_update_list:
# [{group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}},
# {group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}}]
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(context,
filters=filters)
active_backend_id, volume_update_list, group_update_list = (
failover(context,
replicated_vols,
secondary_id=secondary_backend_id,
groups=groups))
try:
update_data = {u['volume_id']: u['updates']
for u in volume_update_list}
except KeyError:
msg = "Update list, doesn't include volume_id"
raise exception.ProgrammingError(reason=msg)
try:
update_group_data = {g['group_id']: g['updates']
for g in group_update_list}
except KeyError:
msg = "Update list, doesn't include group_id"
raise exception.ProgrammingError(reason=msg)
except Exception as exc:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# target
if isinstance(exc, exception.InvalidReplicationTarget):
log_method = LOG.error
# Preserve the replication_status: Status should be failed over
# if we were failing back or if we were failing over from one
# secondary to another secondary. In both cases
# active_backend_id will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
else:
log_method = LOG.exception
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
log_method("Error encountered during failover on host: %(host)s "
"to %(backend_id)s: %(error)s",
{'host': self.host, 'backend_id': secondary_backend_id,
'error': exc})
# We dump the update list for manual recovery
LOG.error('Failed update_list is: %s', volume_update_list)
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for volume in replicated_vols:
update = update_data.get(volume.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
# Set all volume snapshots to error
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
if 'status' in update:
update['previous_status'] = volume.status
volume.update(update)
volume.save()
for grp in groups:
update = update_group_data.get(grp.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
grp.update(update)
grp.save()
LOG.info("Failed over to replication target successfully.")
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
service = self._get_service()
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
LOG.warning('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.')
service = self._get_service()
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info("Set backend status to frozen successfully.")
return True
def thaw_host(self, context):
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
LOG.error('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.')
return False
service = self._get_service()
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info("Thawed backend successfully.")
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
LOG.exception("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.",
{'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to uninitialized driver.")
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable snapshots.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to driver error.")
return driver_entries
def get_capabilities(self, context, discover):
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
@utils.trace
def get_backup_device(self, ctxt, backup, want_objects=False,
async_call=False):
try:
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
except Exception as ex:
if async_call:
LOG.exception("Failed to get backup device. "
"Calling backup continue_backup to cleanup")
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device=None)
return
else:
while excutils.save_and_reraise_exception():
LOG.exception("Failed to get backup device.")
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
backup_device = (
objects.BackupDeviceInfo.from_primitive(backup_device_dict, ctxt)
if want_objects else backup_device_dict)
if async_call:
LOG.info("Calling backup continue_backup for: {}".format(backup))
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device)
else:
# so we fallback to returning the value itself.
return backup_device
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching',
'connector': jsonutils.dumps(connector)}
# TODO(mriedem): Use VolumeAttachment.save() here.
self.db.volume_attachment_update(ctxt, attachment.id, values)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
# Check to see if a mode parameter was set during attachment-create;
# this seems kinda wonky, but it's how we're keeping back compatability
# with the use of connector.mode for now. In other words, we're
if attachment_ref.attach_mode != 'null':
mode = attachment_ref.attach_mode
connector['mode'] = mode
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'))
except Exception as err:
self.message_api.create(
context, message_field.Action.UPDATE_ATTACHMENT,
resource_uuid=vref.id,
exception=err)
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status':
fields.VolumeAttachStatus.ERROR_ATTACHING})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'),
mode,
False)
vref.refresh()
attachment_ref.refresh()
LOG.info("attachment_update completed successfully.",
resource=vref)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
utils.require_driver_initialized(self.driver)
connector = attachment.connector
if not connector and not force:
# in nova, and a shelved offloaded server is not on a compute host,
# which means the attachment was made without a host connector,
# so if we don't have a connector we can't terminate a connection
# that was never actually made to the storage backend, so just
# log a message and exit.
LOG.debug('No connector for attachment %s; skipping storage '
'backend terminate_connection call.', attachment.id)
# None indicates we don't know and don't care.
return None
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
self._do_attachment_delete(context, vref, attachment)
else:
self._do_attachment_delete(context, vref, attachment_ref)
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
if has_shared_connection is not None and not has_shared_connection:
self.driver.remove_export(context.elevated(), vref)
except Exception:
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
def enable_replication(self, ctxt, group):
group.refresh()
if group.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in group %s is not "
"enabling. Cannot enable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in volume %s is not "
"enabling. Cannot enable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "enable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.enable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Enable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Enable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.ENABLED
vol.save()
group.replication_status = fields.ReplicationStatus.ENABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "enable_replication.end", volumes)
LOG.info("Enable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def disable_replication(self, ctxt, group):
group.refresh()
if group.replication_status != fields.ReplicationStatus.DISABLING:
msg = _("Replication status in group %s is not "
"disabling. Cannot disable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if (vol.replication_status !=
fields.ReplicationStatus.DISABLING):
msg = _("Replication status in volume %s is not "
"disabling. Cannot disable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "disable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.disable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Disable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Disable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.DISABLED
vol.save()
group.replication_status = fields.ReplicationStatus.DISABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "disable_replication.end", volumes)
LOG.info("Disable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def failover_replication(self, ctxt, group, allow_attached_volume=False,
secondary_backend_id=None):
group.refresh()
if group.replication_status != fields.ReplicationStatus.FAILING_OVER:
msg = _("Replication status in group %s is not "
"failing-over. Cannot failover replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.status == 'in-use' and not allow_attached_volume:
msg = _("Volume %s is attached but allow_attached_volume flag "
"is False. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if (vol.replication_status !=
fields.ReplicationStatus.FAILING_OVER):
msg = _("Replication status in volume %s is not "
"failing-over. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "failover_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.failover_replication(
ctxt, group, volumes, secondary_backend_id))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Failover replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Failover replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
if secondary_backend_id == "default":
vol.replication_status = fields.ReplicationStatus.ENABLED
else:
vol.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
vol.save()
if secondary_backend_id == "default":
group.replication_status = fields.ReplicationStatus.ENABLED
else:
group.replication_status = fields.ReplicationStatus.FAILED_OVER
group.save()
self._notify_about_group_usage(
ctxt, group, "failover_replication.end", volumes)
LOG.info("Failover replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def list_replication_targets(self, ctxt, group):
replication_targets = []
try:
group.refresh()
if self.configuration.replication_device:
if ctxt.is_admin:
for rep_dev in self.configuration.replication_device:
keys = rep_dev.keys()
dev = {}
for k in keys:
dev[k] = rep_dev[k]
replication_targets.append(dev)
else:
for rep_dev in self.configuration.replication_device:
dev = rep_dev.get('backend_id')
if dev:
replication_targets.append({'backend_id': dev})
except exception.GroupNotFound:
err_msg = (_("Get replication targets failed. Group %s not "
"found.") % group.id)
LOG.exception(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return {'replication_targets': replication_targets}
| true
| true
|
f715b36d352614e7311fe752bba62a9ff28b7113
| 769
|
py
|
Python
|
misc/splitpstack.py
|
csutherl/support-helpers
|
179ea920e2a04bda47cb583c85cc0163fd7ab4d3
|
[
"Apache-2.0"
] | 6
|
2015-01-21T20:22:42.000Z
|
2015-09-14T16:40:37.000Z
|
misc/splitpstack.py
|
csutherl/support-helpers
|
179ea920e2a04bda47cb583c85cc0163fd7ab4d3
|
[
"Apache-2.0"
] | null | null | null |
misc/splitpstack.py
|
csutherl/support-helpers
|
179ea920e2a04bda47cb583c85cc0163fd7ab4d3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys
def helpexit():
print './splitpstack.py <filename>'
exit(1)
if __name__=='__main__':
if len(sys.argv) != 2:
helpexit()
else:
try:
currentDate = None
f = open(sys.argv[1], 'r')
currentFile = None
for line in f:
if 'Date:' in line:
currentDate = line.split('Date:')[1].replace(' ', '_')
if currentFile:
currentFile.close()
currentFile = open('pstack-' + currentDate.strip(), 'w')
if currentDate and currentFile:
currentFile.write(line)
except:
print 'error!'
finally:
f.close()
| 25.633333
| 76
| 0.456437
|
import sys
def helpexit():
print './splitpstack.py <filename>'
exit(1)
if __name__=='__main__':
if len(sys.argv) != 2:
helpexit()
else:
try:
currentDate = None
f = open(sys.argv[1], 'r')
currentFile = None
for line in f:
if 'Date:' in line:
currentDate = line.split('Date:')[1].replace(' ', '_')
if currentFile:
currentFile.close()
currentFile = open('pstack-' + currentDate.strip(), 'w')
if currentDate and currentFile:
currentFile.write(line)
except:
print 'error!'
finally:
f.close()
| false
| true
|
f715b378abaeeb7ac3904a1e1b60d24e706d5389
| 1,803
|
py
|
Python
|
tensorflow_federated/python/core/impl/executor_stacks/executor_stack_bindings_test.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | 1,918
|
2019-02-22T21:17:28.000Z
|
2022-03-30T14:49:53.000Z
|
tensorflow_federated/python/core/impl/executor_stacks/executor_stack_bindings_test.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | 999
|
2019-02-22T21:47:44.000Z
|
2022-03-31T11:06:42.000Z
|
tensorflow_federated/python/core/impl/executor_stacks/executor_stack_bindings_test.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | 498
|
2019-02-22T21:17:56.000Z
|
2022-03-29T02:54:15.000Z
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pybind11_abseil import status as absl_status
from tensorflow_federated.python.core.impl.executor_stacks import executor_stack_bindings
from tensorflow_federated.python.core.impl.executors import executor_bindings
from tensorflow_federated.python.core.impl.types import placements
_TARGET_LIST = ['localhost:8000', 'localhost:8001']
_CARDINALITIES = {placements.CLIENTS: 5}
class ExecutorStackBindingsTest(parameterized.TestCase):
@parameterized.named_parameters(('from_target_list', list),
('from_target_tuple', tuple),
('from_target_ndarray', np.array))
def test_executor_construction_raises_no_channels_available(
self, container_constructor):
with self.assertRaisesRegex(absl_status.StatusNotOk, 'UNAVAILABLE'):
executor_stack_bindings.create_remote_executor_stack(
channels=container_constructor([
executor_bindings.create_insecure_grpc_channel(t)
for t in _TARGET_LIST
]),
cardinalities=_CARDINALITIES)
if __name__ == '__main__':
absltest.main()
| 39.195652
| 89
| 0.749307
|
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pybind11_abseil import status as absl_status
from tensorflow_federated.python.core.impl.executor_stacks import executor_stack_bindings
from tensorflow_federated.python.core.impl.executors import executor_bindings
from tensorflow_federated.python.core.impl.types import placements
_TARGET_LIST = ['localhost:8000', 'localhost:8001']
_CARDINALITIES = {placements.CLIENTS: 5}
class ExecutorStackBindingsTest(parameterized.TestCase):
@parameterized.named_parameters(('from_target_list', list),
('from_target_tuple', tuple),
('from_target_ndarray', np.array))
def test_executor_construction_raises_no_channels_available(
self, container_constructor):
with self.assertRaisesRegex(absl_status.StatusNotOk, 'UNAVAILABLE'):
executor_stack_bindings.create_remote_executor_stack(
channels=container_constructor([
executor_bindings.create_insecure_grpc_channel(t)
for t in _TARGET_LIST
]),
cardinalities=_CARDINALITIES)
if __name__ == '__main__':
absltest.main()
| true
| true
|
f715b38795d175d33576ab05dea9a3fe41688f13
| 1,863
|
py
|
Python
|
samples/snippets/detect/label-products.py
|
glaswasser/python-vision
|
706c314a86b8f35c313bb3e907ae84317dca1a0b
|
[
"Apache-2.0"
] | null | null | null |
samples/snippets/detect/label-products.py
|
glaswasser/python-vision
|
706c314a86b8f35c313bb3e907ae84317dca1a0b
|
[
"Apache-2.0"
] | null | null | null |
samples/snippets/detect/label-products.py
|
glaswasser/python-vision
|
706c314a86b8f35c313bb3e907ae84317dca1a0b
|
[
"Apache-2.0"
] | null | null | null |
from detect import (detect_logos, detect_text)
import pandas as pd
import re
import os
#from __future__ import print_function
from google.cloud import vision
images_path = "C:\\Users\\heinz\\Yagora GmbH\\Ievgen Kyrda - Crawler\\images\\foodnewsgermany_images/"
file_names = os.listdir(os.path.dirname(images_path))
file_paths = [images_path + f for f in file_names]
logos = [detect_logos(f) for f in file_paths]
texts = [detect_text(f)[0].description for f in file_paths]
# remove line break symbols
texts = [x.replace("\n", ", ") for x in texts]
contained = []
#contained[1] = "test"
for i in range(len(logos)): # loop over future rows of df
tmp = []
for j in logos[i]: # for every logo-row, check if in text
if j.lower() in texts[i].lower():
tmp.append(logos[i])
else:
tmp.append(None)
contained.append(tmp)
detect_df = pd.DataFrame(
list(zip(file_names, texts, logos, contained, file_paths)),
columns = ["files", "texts", "logos", "probable_brand", "file_path"]
)
detect_df
# other ideas:
# if logo in existing logos, add logo
from PIL import Image
from io import BytesIO
from IPython.display import HTML
import base64
pd.set_option('display.max_colwidth', -1)
def get_thumbnail(path):
i = Image.open(path)
i.thumbnail((150, 150), Image.LANCZOS)
return i
def image_base64(im):
if isinstance(im, str):
im = get_thumbnail(im)
with BytesIO() as buffer:
im.save(buffer, 'jpeg')
return base64.b64encode(buffer.getvalue()).decode()
def image_formatter(im):
return f'<img src="data:image/jpeg;base64,{image_base64(im)}">'
#dogs['file'] = dogs.id.map(lambda id: f'../input/train/{id}.jpg')
detect_df['image'] = detect_df.file_path.map(lambda f: get_thumbnail(f))
HTML(detect_df.to_html(formatters={'image': image_formatter}, escape=False))
| 26.239437
| 102
| 0.688137
|
from detect import (detect_logos, detect_text)
import pandas as pd
import re
import os
from google.cloud import vision
images_path = "C:\\Users\\heinz\\Yagora GmbH\\Ievgen Kyrda - Crawler\\images\\foodnewsgermany_images/"
file_names = os.listdir(os.path.dirname(images_path))
file_paths = [images_path + f for f in file_names]
logos = [detect_logos(f) for f in file_paths]
texts = [detect_text(f)[0].description for f in file_paths]
texts = [x.replace("\n", ", ") for x in texts]
contained = []
for i in range(len(logos)):
tmp = []
for j in logos[i]:
if j.lower() in texts[i].lower():
tmp.append(logos[i])
else:
tmp.append(None)
contained.append(tmp)
detect_df = pd.DataFrame(
list(zip(file_names, texts, logos, contained, file_paths)),
columns = ["files", "texts", "logos", "probable_brand", "file_path"]
)
detect_df
from PIL import Image
from io import BytesIO
from IPython.display import HTML
import base64
pd.set_option('display.max_colwidth', -1)
def get_thumbnail(path):
i = Image.open(path)
i.thumbnail((150, 150), Image.LANCZOS)
return i
def image_base64(im):
if isinstance(im, str):
im = get_thumbnail(im)
with BytesIO() as buffer:
im.save(buffer, 'jpeg')
return base64.b64encode(buffer.getvalue()).decode()
def image_formatter(im):
return f'<img src="data:image/jpeg;base64,{image_base64(im)}">'
detect_df['image'] = detect_df.file_path.map(lambda f: get_thumbnail(f))
HTML(detect_df.to_html(formatters={'image': image_formatter}, escape=False))
| true
| true
|
f715b443834e4ea4db5d450ce663e81845d95977
| 749
|
py
|
Python
|
Python/total-appeal-of-a-string.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | 4
|
2018-10-11T17:50:56.000Z
|
2018-10-11T21:16:44.000Z
|
Python/total-appeal-of-a-string.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | null | null | null |
Python/total-appeal-of-a-string.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | 4
|
2018-10-11T18:50:32.000Z
|
2018-10-12T00:04:09.000Z
|
# Time: O(n)
# Space: O(26)
# combinatorics
class Solution(object):
def appealSum(self, s):
"""
:type s: str
:rtype: int
"""
result = curr = 0
lookup = [-1]*26
for i, c in enumerate(s):
result += (i-lookup[ord(c)-ord('a')])*(len(s)-i)
lookup[ord(c)-ord('a')] = i
return result
# Time: O(n)
# Space: O(26)
# counting
class Solution2(object):
def appealSum(self, s):
"""
:type s: str
:rtype: int
"""
result = cnt = 0
lookup = [-1]*26
for i, c in enumerate(s):
cnt += i-lookup[ord(c)-ord('a')]
lookup[ord(c)-ord('a')] = i
result += cnt
return result
| 20.805556
| 60
| 0.445928
|
class Solution(object):
def appealSum(self, s):
result = curr = 0
lookup = [-1]*26
for i, c in enumerate(s):
result += (i-lookup[ord(c)-ord('a')])*(len(s)-i)
lookup[ord(c)-ord('a')] = i
return result
class Solution2(object):
def appealSum(self, s):
result = cnt = 0
lookup = [-1]*26
for i, c in enumerate(s):
cnt += i-lookup[ord(c)-ord('a')]
lookup[ord(c)-ord('a')] = i
result += cnt
return result
| true
| true
|
f715b4574d66756d1158d3deb81d9fa1b677cc30
| 3,454
|
py
|
Python
|
huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/batch_tag_action_request_body.py
|
NQLoong/huaweicloud-sdk-python-v3
|
677944a0b722147c6e105c53df9110724d64152a
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/batch_tag_action_request_body.py
|
mawenbo-huawei/huaweicloud-sdk-python-v3
|
677944a0b722147c6e105c53df9110724d64152a
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/batch_tag_action_request_body.py
|
mawenbo-huawei/huaweicloud-sdk-python-v3
|
677944a0b722147c6e105c53df9110724d64152a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class BatchTagActionRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'action': 'str',
'tags': 'list[BatchTagActionTagOption]'
}
attribute_map = {
'action': 'action',
'tags': 'tags'
}
def __init__(self, action=None, tags=None):
"""BatchTagActionRequestBody - a model defined in huaweicloud sdk"""
self._action = None
self._tags = None
self.discriminator = None
self.action = action
self.tags = tags
@property
def action(self):
"""Gets the action of this BatchTagActionRequestBody.
操作标识。取值: - create,表示添加标签。 - delete,表示删除标签。
:return: The action of this BatchTagActionRequestBody.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this BatchTagActionRequestBody.
操作标识。取值: - create,表示添加标签。 - delete,表示删除标签。
:param action: The action of this BatchTagActionRequestBody.
:type: str
"""
self._action = action
@property
def tags(self):
"""Gets the tags of this BatchTagActionRequestBody.
标签列表。
:return: The tags of this BatchTagActionRequestBody.
:rtype: list[BatchTagActionTagOption]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this BatchTagActionRequestBody.
标签列表。
:param tags: The tags of this BatchTagActionRequestBody.
:type: list[BatchTagActionTagOption]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchTagActionRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.397059
| 76
| 0.54806
|
import pprint
import re
import six
class BatchTagActionRequestBody:
sensitive_list = []
openapi_types = {
'action': 'str',
'tags': 'list[BatchTagActionTagOption]'
}
attribute_map = {
'action': 'action',
'tags': 'tags'
}
def __init__(self, action=None, tags=None):
self._action = None
self._tags = None
self.discriminator = None
self.action = action
self.tags = tags
@property
def action(self):
return self._action
@action.setter
def action(self, action):
self._action = action
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, tags):
self._tags = tags
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BatchTagActionRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f715b4c57e072ac7d2f65d981630ad2bb277941f
| 5,616
|
py
|
Python
|
src/assisted_test_infra/test_infra/controllers/node_controllers/node_controller.py
|
nirarg/assisted-test-infra
|
e07c43501c1d9bfaa1aee3aea49f1ef359faee07
|
[
"Apache-2.0"
] | null | null | null |
src/assisted_test_infra/test_infra/controllers/node_controllers/node_controller.py
|
nirarg/assisted-test-infra
|
e07c43501c1d9bfaa1aee3aea49f1ef359faee07
|
[
"Apache-2.0"
] | 248
|
2020-11-09T06:47:39.000Z
|
2022-03-28T06:02:39.000Z
|
src/assisted_test_infra/test_infra/controllers/node_controllers/node_controller.py
|
nirarg/assisted-test-infra
|
e07c43501c1d9bfaa1aee3aea49f1ef359faee07
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Any, Callable, List, Optional, SupportsAbs, Tuple, TypeVar
import libvirt
from assisted_test_infra.test_infra import BaseEntityConfig
from assisted_test_infra.test_infra.controllers.node_controllers.disk import Disk
from assisted_test_infra.test_infra.controllers.node_controllers.node import Node
from assisted_test_infra.test_infra.helper_classes.config.controller_config import BaseNodeConfig
from service_client import log
class NodeController(ABC):
T = TypeVar("T", bound=SupportsAbs[BaseNodeConfig])
def __init__(self, config: T, entity_config: BaseEntityConfig):
self._config = config
self._entity_config = entity_config
def log_configuration(self):
log.info(f"controller configuration={self._config}")
@property
def workers_count(self):
return self._config.workers_count
@property
def masters_count(self):
return self._config.masters_count
@property
def is_ipv4(self):
return self._config.is_ipv4
@property
def is_ipv6(self):
return self._config.is_ipv6
@abstractmethod
def list_nodes(self) -> List[Node]:
pass
@abstractmethod
def list_disks(self, node_name: str) -> List[Disk]:
pass
@abstractmethod
def list_networks(self) -> List[Any]:
pass
@abstractmethod
def list_leases(self, network_name: str) -> List[Any]:
pass
@abstractmethod
def shutdown_node(self, node_name: str) -> None:
pass
@abstractmethod
def shutdown_all_nodes(self) -> None:
pass
@abstractmethod
def start_node(self, node_name: str, check_ips: bool) -> None:
pass
@abstractmethod
def start_all_nodes(self) -> List[Node]:
pass
@abstractmethod
def restart_node(self, node_name: str) -> None:
pass
@abstractmethod
def format_node_disk(self, node_name: str, disk_index: int = 0) -> None:
pass
@abstractmethod
def format_all_node_disks(self) -> None:
pass
@abstractmethod
def attach_test_disk(self, node_name: str, disk_size: int, bootable=False, persistent=False, with_wwn=False):
"""
Attaches a test disk. That disk can later be detached with `detach_all_test_disks`
:param with_wwn: Weather the disk should have a WWN(World Wide Name), Having a WWN creates a disk by-id link
:param node_name: Node to attach disk to
:param disk_size: Size of disk to attach
:param bootable: Whether to format an MBR sector at the beginning of the disk
:param persistent: Whether the disk should survive shutdowns
"""
pass
@abstractmethod
def detach_all_test_disks(self, node_name: str):
"""
Detaches all test disks created by `attach_test_disk`
:param node_name: Node to detach disk from
"""
pass
@abstractmethod
def get_ingress_and_api_vips(self) -> dict:
pass
@abstractmethod
def destroy_all_nodes(self) -> None:
pass
@abstractmethod
def get_cluster_network(self) -> str:
pass
@abstractmethod
def setup_time(self) -> str:
pass
@abstractmethod
def prepare_nodes(self):
pass
@abstractmethod
def is_active(self, node_name) -> bool:
pass
@abstractmethod
def set_boot_order(self, node_name, cd_first=False) -> None:
pass
@abstractmethod
def set_per_device_boot_order(self, node_name, key: Callable[[Disk], int]) -> None:
"""
Set the boot priority for every disk
It sorts the disk according to the key function result
:param node_name: The node to change its boot order
:param key: a key function that gets a Disk object and decide it's priority
"""
pass
@abstractmethod
def get_node_ips_and_macs(self, node_name) -> Tuple[List[str], List[str]]:
pass
@abstractmethod
def set_single_node_ip(self, ip) -> None:
pass
@abstractmethod
def get_host_id(self, node_name: str) -> str:
pass
@abstractmethod
def get_cpu_cores(self, node_name: str) -> int:
pass
@abstractmethod
def set_cpu_cores(self, node_name: str, core_count: int) -> None:
pass
@abstractmethod
def get_ram_kib(self, node_name: str) -> int:
pass
@abstractmethod
def set_ram_kib(self, node_name: str, ram_kib: int) -> None:
pass
def get_primary_machine_cidr(self) -> Optional[str]:
# Default to auto resolve by the cluster. see cluster.get_primary_machine_cidr
return None
def get_provisioning_cidr(self) -> Optional[str]:
return None
@abstractmethod
def attach_interface(self, node_name, network_xml: str) -> Tuple[libvirt.virNetwork, str]:
pass
@abstractmethod
def add_interface(self, node_name, network_name, target_interface: str) -> str:
pass
@abstractmethod
def undefine_interface(self, node_name: str, mac: str):
pass
@abstractmethod
def create_network(self, network_xml: str) -> libvirt.virNetwork:
pass
@abstractmethod
def get_network_by_name(self, network_name: str) -> libvirt.virNetwork:
pass
@abstractmethod
def destroy_network(self, network: libvirt.virNetwork):
pass
def notify_iso_ready(self) -> None:
pass
def set_dns(self, api_vip: str, ingress_vip: str) -> None:
pass
def set_dns_for_user_managed_network(self) -> None:
pass
| 26.742857
| 116
| 0.667379
|
from abc import ABC, abstractmethod
from typing import Any, Callable, List, Optional, SupportsAbs, Tuple, TypeVar
import libvirt
from assisted_test_infra.test_infra import BaseEntityConfig
from assisted_test_infra.test_infra.controllers.node_controllers.disk import Disk
from assisted_test_infra.test_infra.controllers.node_controllers.node import Node
from assisted_test_infra.test_infra.helper_classes.config.controller_config import BaseNodeConfig
from service_client import log
class NodeController(ABC):
T = TypeVar("T", bound=SupportsAbs[BaseNodeConfig])
def __init__(self, config: T, entity_config: BaseEntityConfig):
self._config = config
self._entity_config = entity_config
def log_configuration(self):
log.info(f"controller configuration={self._config}")
@property
def workers_count(self):
return self._config.workers_count
@property
def masters_count(self):
return self._config.masters_count
@property
def is_ipv4(self):
return self._config.is_ipv4
@property
def is_ipv6(self):
return self._config.is_ipv6
@abstractmethod
def list_nodes(self) -> List[Node]:
pass
@abstractmethod
def list_disks(self, node_name: str) -> List[Disk]:
pass
@abstractmethod
def list_networks(self) -> List[Any]:
pass
@abstractmethod
def list_leases(self, network_name: str) -> List[Any]:
pass
@abstractmethod
def shutdown_node(self, node_name: str) -> None:
pass
@abstractmethod
def shutdown_all_nodes(self) -> None:
pass
@abstractmethod
def start_node(self, node_name: str, check_ips: bool) -> None:
pass
@abstractmethod
def start_all_nodes(self) -> List[Node]:
pass
@abstractmethod
def restart_node(self, node_name: str) -> None:
pass
@abstractmethod
def format_node_disk(self, node_name: str, disk_index: int = 0) -> None:
pass
@abstractmethod
def format_all_node_disks(self) -> None:
pass
@abstractmethod
def attach_test_disk(self, node_name: str, disk_size: int, bootable=False, persistent=False, with_wwn=False):
pass
@abstractmethod
def detach_all_test_disks(self, node_name: str):
pass
@abstractmethod
def get_ingress_and_api_vips(self) -> dict:
pass
@abstractmethod
def destroy_all_nodes(self) -> None:
pass
@abstractmethod
def get_cluster_network(self) -> str:
pass
@abstractmethod
def setup_time(self) -> str:
pass
@abstractmethod
def prepare_nodes(self):
pass
@abstractmethod
def is_active(self, node_name) -> bool:
pass
@abstractmethod
def set_boot_order(self, node_name, cd_first=False) -> None:
pass
@abstractmethod
def set_per_device_boot_order(self, node_name, key: Callable[[Disk], int]) -> None:
pass
@abstractmethod
def get_node_ips_and_macs(self, node_name) -> Tuple[List[str], List[str]]:
pass
@abstractmethod
def set_single_node_ip(self, ip) -> None:
pass
@abstractmethod
def get_host_id(self, node_name: str) -> str:
pass
@abstractmethod
def get_cpu_cores(self, node_name: str) -> int:
pass
@abstractmethod
def set_cpu_cores(self, node_name: str, core_count: int) -> None:
pass
@abstractmethod
def get_ram_kib(self, node_name: str) -> int:
pass
@abstractmethod
def set_ram_kib(self, node_name: str, ram_kib: int) -> None:
pass
def get_primary_machine_cidr(self) -> Optional[str]:
return None
def get_provisioning_cidr(self) -> Optional[str]:
return None
@abstractmethod
def attach_interface(self, node_name, network_xml: str) -> Tuple[libvirt.virNetwork, str]:
pass
@abstractmethod
def add_interface(self, node_name, network_name, target_interface: str) -> str:
pass
@abstractmethod
def undefine_interface(self, node_name: str, mac: str):
pass
@abstractmethod
def create_network(self, network_xml: str) -> libvirt.virNetwork:
pass
@abstractmethod
def get_network_by_name(self, network_name: str) -> libvirt.virNetwork:
pass
@abstractmethod
def destroy_network(self, network: libvirt.virNetwork):
pass
def notify_iso_ready(self) -> None:
pass
def set_dns(self, api_vip: str, ingress_vip: str) -> None:
pass
def set_dns_for_user_managed_network(self) -> None:
pass
| true
| true
|
f715b57902684613288421b8c0d1be2ab344f1cf
| 2,747
|
py
|
Python
|
taxtea/checks.py
|
lowercase-app/django-taxtea
|
aa8184c1aceb67ecf34eda2e48184e810616f59f
|
[
"MIT"
] | 13
|
2020-07-20T17:35:32.000Z
|
2021-09-25T02:11:44.000Z
|
taxtea/checks.py
|
lowercase-app/django-taxtea
|
aa8184c1aceb67ecf34eda2e48184e810616f59f
|
[
"MIT"
] | 51
|
2020-07-22T13:56:09.000Z
|
2022-02-05T06:04:36.000Z
|
taxtea/checks.py
|
lowercase-app/django-taxtea
|
aa8184c1aceb67ecf34eda2e48184e810616f59f
|
[
"MIT"
] | null | null | null |
from typing import List
from django.apps.config import AppConfig
from django.core.checks import CheckMessage, Critical, Tags, register
@register(Tags.compatibility)
def check_USPS_api_auth(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
"""
check_USPS_api_auth:
Checks if the user has supplied a USPS username/password.
Args:
appconfig (AppConfig, optional): Defaults to None.
Returns:
List[checks.CheckMessage]: List of Django CheckMessages
"""
from . import settings as tax_settings
messages = []
if not tax_settings.USPS_USER:
msg = "Could not find a USPS User."
hint = "Add TAXTEA_USPS_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C001"))
return messages
@register(Tags.compatibility)
def check_Avalara_api_auth(
app_configs: AppConfig = None, **kwargs
) -> List[CheckMessage]:
"""
check_Avalara_api_auth:
Checks if the user has supplied a Avalara username/password.
Args:
appconfig (AppConfig, optional): Defaults to None.
Returns:
List[checks.CheckMessage]: List of Django CheckMessages
"""
from . import settings as tax_settings
messages = []
if not tax_settings.AVALARA_USER:
msg = "Could not find a Avalara User."
hint = "Add TAXTEA_AVALARA_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C002"))
if not tax_settings.AVALARA_PASSWORD:
msg = "Could not find a Avalara Password."
hint = "Add TAXTEA_AVALARA_PASSWORD to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C003"))
return messages
@register(Tags.compatibility)
def check_origin_zips(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
"""
check_origin_zips:
Checks if the user has supplied at least one origin zip.
Args:
appconfig (AppConfig, optional): Defaults to None.
Returns:
List[checks.CheckMessage]: List of Django CheckMessages
"""
from . import settings as tax_settings
messages = []
if not tax_settings.NEXUSES:
msg = "Could not find a Nexus."
hint = "Add at least one TAXTEA_NEXUSES to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C004"))
# If there is no TAX_NEXUS, then the next check will throw an IndexError
return messages
state, zip_code = tax_settings.NEXUSES[0]
if not state and not zip_code:
msg = "Could not find a valid Nexus tuple."
hint = "Add at least one Nexus tuple ('STATE', 'ZIPCODE') to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C005"))
return messages
| 30.186813
| 87
| 0.67419
|
from typing import List
from django.apps.config import AppConfig
from django.core.checks import CheckMessage, Critical, Tags, register
@register(Tags.compatibility)
def check_USPS_api_auth(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
from . import settings as tax_settings
messages = []
if not tax_settings.USPS_USER:
msg = "Could not find a USPS User."
hint = "Add TAXTEA_USPS_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C001"))
return messages
@register(Tags.compatibility)
def check_Avalara_api_auth(
app_configs: AppConfig = None, **kwargs
) -> List[CheckMessage]:
from . import settings as tax_settings
messages = []
if not tax_settings.AVALARA_USER:
msg = "Could not find a Avalara User."
hint = "Add TAXTEA_AVALARA_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C002"))
if not tax_settings.AVALARA_PASSWORD:
msg = "Could not find a Avalara Password."
hint = "Add TAXTEA_AVALARA_PASSWORD to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C003"))
return messages
@register(Tags.compatibility)
def check_origin_zips(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
from . import settings as tax_settings
messages = []
if not tax_settings.NEXUSES:
msg = "Could not find a Nexus."
hint = "Add at least one TAXTEA_NEXUSES to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C004"))
return messages
state, zip_code = tax_settings.NEXUSES[0]
if not state and not zip_code:
msg = "Could not find a valid Nexus tuple."
hint = "Add at least one Nexus tuple ('STATE', 'ZIPCODE') to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C005"))
return messages
| true
| true
|
f715b7500a857c259eab2aab6854485671e9f369
| 6,405
|
py
|
Python
|
lakshmi/cache.py
|
sarvjeets/lakshmi
|
8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5
|
[
"MIT"
] | 59
|
2021-09-07T05:19:30.000Z
|
2022-02-24T18:29:49.000Z
|
lakshmi/cache.py
|
sarvjeets/lakshmi
|
8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5
|
[
"MIT"
] | 4
|
2021-08-01T18:32:51.000Z
|
2022-02-26T19:14:37.000Z
|
lakshmi/cache.py
|
sarvjeets/lakshmi
|
8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5
|
[
"MIT"
] | 3
|
2021-08-01T04:35:07.000Z
|
2022-03-23T21:48:51.000Z
|
"""
This class is used to cache return value of functions on disk for a specified
number of days. This is used by lakshmi.assets module to cache name/ asset
value (i.e the slow functions). For examples on how to use this class, please
see the tests (tests/test_cache.py file).
Currently, this module can only be used on functions which are class members
and the function itself must take no arguments. These restrictions can be
easily relaxed, but so far that all usecases don't need anything more than what
is currently implemented.
In addition to caching values, this class also allows one to optionally call
a user-specified function on cache-misses (currently used to show a progress
bar to the user via the lak CLI).
"""
import functools
import pickle
from abc import ABC, abstractmethod
from datetime import datetime
from hashlib import md5
from pathlib import Path
# Inspired by https://pypi.org/project/cache-to-disk/. I tried using other
# options such as requests-cache, but it was too slow compared to the solution
# implemented here.
class Cacheable(ABC):
"""Interface that declares that a particular class's method return
values could be cached. The methods should not take a parameter,
and cache_key() + method name should uniquely imply the return
value of that class."""
@abstractmethod
def cache_key(self):
"""Unique string value used as key for caching."""
pass
def get_file_age(file):
"""Returns the age of file.
Args:
file: A PosixPath object representing a file.
Returns: An int represeting the age in days.
"""
return (datetime.today()
- datetime.fromtimestamp(file.stat().st_mtime)).days
# Constants
# Default cache directory if none is specified.
_DEFAULT_DIR = Path.home() / '.lakshmicache'
_CACHE_STR = 'cache_dir'
_FORCE_STR = 'force_refresh'
_FORCED_FILES_STR = 'forced_files'
_MISS_FUNC_STR = 'miss_func'
# Dict (string -> object) to keep cache context.
# Description of keys to what is stored:
# _CACHE_STR:
# The pathlib.Path object specifying cache directory. If set to None,
# caching is disabled. Default: _DEFAULT_DIR
# _FORCE_STR:
# If set to True, new values are re-generated once even if a cached one is
# available. This is meant for data that is cached for < month (stock prices
# and Treasury Bond value). Values that are cached for > 40 days ignore this
# flag. Default: False
# _FORCED_FILES_STR:
# A set of files which are already refreshed once due to _ctx[_FORCE_STR]
# being set to True. this is used to ensure we don't re-fetch same values
# multiple times in a session.
# _MISS_FUNC_STR:
# If set, this function is called for every cache miss.
_ctx = {_FORCE_STR: False}
def set_force_refresh(v):
"""Sets whether cached values should be refreshed.
Args:
v: Boolean representing if cached values should be re-generated.
"""
global _ctx
_ctx[_FORCE_STR] = v
_ctx[_FORCED_FILES_STR] = set()
def set_cache_miss_func(f):
"""Sets the function to call for cache-misses.
Args:
f: The function to call whenever a cache-miss happens (i.e. whenever
the underlying function is called instead of using a cached value).
"""
global _ctx
if f:
_ctx[_MISS_FUNC_STR] = f
else:
# Clear out previously set function, if any.
_ctx.pop(_MISS_FUNC_STR, None)
def set_cache_dir(cache_dir):
"""Sets the cache directory.
If the cache directory is not specified, default ~/.lakshmicache
is used.
Args:
cache_dir: The pathlib.Path object specifying cache directory.
If set to None, caching is disabled.
"""
global _ctx
_ctx[_CACHE_STR] = cache_dir
if cache_dir is None:
return
cache_dir.mkdir(exist_ok=True) # Create cache dir if one doesn't exist.
# Delete old files whose cache values are invalid already.
for file in cache_dir.glob('*_*.lkc'):
days = int(file.name.split('_')[0])
if get_file_age(file) >= days:
file.unlink()
def _valid_cached_value(file, days):
"""Helper function to check if the cached value from file is valid.
Args:
file: The Path object representing a file potentially containing
previously cached value.
days: Number of days after which the cached value becomes invalid.
Returns: True iff the cached value in file is valid.
"""
MAX_DAYS_TO_FORCE_REFRESH = 40
if (
_ctx[_FORCE_STR]
and days < MAX_DAYS_TO_FORCE_REFRESH
and file.name not in _ctx[_FORCED_FILES_STR]
):
# Ignore cached value.
_ctx[_FORCED_FILES_STR].add(file.name)
return False
return (file.exists() and get_file_age(file) < days)
def _call_func(class_obj, func):
"""Helper function to return value of class_obj.func().
In addition to calling function, this helper also calls the
cache_miss function if one is set in the context.
Args:
class_obj: The object of a particular class implementing Cacheable
interface.
func: The function whose return values has to be cached. Assumed
to take no parameters.
Returns: The return value of the func.
"""
global _ctx
if _MISS_FUNC_STR in _ctx:
_ctx[_MISS_FUNC_STR]()
return func(class_obj)
def cache(days):
"""Returns decorator that caches functions return value on disk for
specified number of days.
Args:
days: Number of days for which to cache the return value of the
function.
Returns: The decorator.
"""
def decorator(func):
@functools.wraps(func)
def new_func(class_obj):
global _ctx
if _CACHE_STR not in _ctx:
# Cache dir not set. Set to default.
set_cache_dir(_DEFAULT_DIR)
cache_dir = _ctx[_CACHE_STR]
if not cache_dir:
return _call_func(class_obj, func)
key = f'{func.__qualname__}_{class_obj.cache_key()}'
filename = f'{days}_{md5(key.encode("utf8")).hexdigest()}.lkc'
file = cache_dir / filename
if _valid_cached_value(file, days):
return pickle.loads(file.read_bytes())
value = _call_func(class_obj, func)
file.write_bytes(pickle.dumps(value))
return value
return new_func
return decorator
| 32.025
| 79
| 0.684465
|
import functools
import pickle
from abc import ABC, abstractmethod
from datetime import datetime
from hashlib import md5
from pathlib import Path
class Cacheable(ABC):
@abstractmethod
def cache_key(self):
pass
def get_file_age(file):
return (datetime.today()
- datetime.fromtimestamp(file.stat().st_mtime)).days
_DEFAULT_DIR = Path.home() / '.lakshmicache'
_CACHE_STR = 'cache_dir'
_FORCE_STR = 'force_refresh'
_FORCED_FILES_STR = 'forced_files'
_MISS_FUNC_STR = 'miss_func'
# multiple times in a session.
# _MISS_FUNC_STR:
# If set, this function is called for every cache miss.
_ctx = {_FORCE_STR: False}
def set_force_refresh(v):
global _ctx
_ctx[_FORCE_STR] = v
_ctx[_FORCED_FILES_STR] = set()
def set_cache_miss_func(f):
global _ctx
if f:
_ctx[_MISS_FUNC_STR] = f
else:
# Clear out previously set function, if any.
_ctx.pop(_MISS_FUNC_STR, None)
def set_cache_dir(cache_dir):
global _ctx
_ctx[_CACHE_STR] = cache_dir
if cache_dir is None:
return
cache_dir.mkdir(exist_ok=True) # Create cache dir if one doesn't exist.
for file in cache_dir.glob('*_*.lkc'):
days = int(file.name.split('_')[0])
if get_file_age(file) >= days:
file.unlink()
def _valid_cached_value(file, days):
MAX_DAYS_TO_FORCE_REFRESH = 40
if (
_ctx[_FORCE_STR]
and days < MAX_DAYS_TO_FORCE_REFRESH
and file.name not in _ctx[_FORCED_FILES_STR]
):
_ctx[_FORCED_FILES_STR].add(file.name)
return False
return (file.exists() and get_file_age(file) < days)
def _call_func(class_obj, func):
global _ctx
if _MISS_FUNC_STR in _ctx:
_ctx[_MISS_FUNC_STR]()
return func(class_obj)
def cache(days):
def decorator(func):
@functools.wraps(func)
def new_func(class_obj):
global _ctx
if _CACHE_STR not in _ctx:
set_cache_dir(_DEFAULT_DIR)
cache_dir = _ctx[_CACHE_STR]
if not cache_dir:
return _call_func(class_obj, func)
key = f'{func.__qualname__}_{class_obj.cache_key()}'
filename = f'{days}_{md5(key.encode("utf8")).hexdigest()}.lkc'
file = cache_dir / filename
if _valid_cached_value(file, days):
return pickle.loads(file.read_bytes())
value = _call_func(class_obj, func)
file.write_bytes(pickle.dumps(value))
return value
return new_func
return decorator
| true
| true
|
f715b7a5eadb8af7edda0af1e5732c76618605bb
| 951
|
py
|
Python
|
tests/test_basic.py
|
mustafamerttunali/Tensorflow-Training-GUI
|
ededb2dbfeefeac7ea6bf2986090ebcdf6905f45
|
[
"MIT"
] | 84
|
2019-12-28T15:05:46.000Z
|
2020-12-01T15:10:56.000Z
|
tests/test_basic.py
|
mustafakisacik/Deep-Learning-Training-GUI
|
1992185fd18e768f30c5bb5edd08ea709be97b09
|
[
"MIT"
] | 6
|
2019-12-28T02:18:08.000Z
|
2020-11-13T17:40:14.000Z
|
tests/test_basic.py
|
mustafakisacik/Deep-Learning-Training-GUI
|
1992185fd18e768f30c5bb5edd08ea709be97b09
|
[
"MIT"
] | 23
|
2019-12-29T19:14:23.000Z
|
2020-12-07T09:43:52.000Z
|
import os
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from multiprocessing import Process
def startTensorboard(logdir):
# Start tensorboard with system call
os.system("tensorboard --logdir {}".format(logdir))
def fitModel():
# Create your model
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# Some mock training data
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
# Run the fit function
model.fit(data, labels, epochs=100, batch_size=32)
if __name__ == '__main__':
# Run both processes simultaneously
Process(target=startTensorboard, args=("logs",)).start()
Process(target=fitModel).start()
| 28.818182
| 60
| 0.684543
|
import os
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from multiprocessing import Process
def startTensorboard(logdir):
os.system("tensorboard --logdir {}".format(logdir))
def fitModel():
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
model.fit(data, labels, epochs=100, batch_size=32)
if __name__ == '__main__':
Process(target=startTensorboard, args=("logs",)).start()
Process(target=fitModel).start()
| true
| true
|
f715b9d364ffce61b2e55ddeebf9ea8f7ff852a8
| 1,094
|
py
|
Python
|
tests/contrib/hooks/test_nomad_hook.py
|
YotpoLtd/incubator-airflow
|
86bd47db6084b23f4eb4b4c1dfc7f0293e4308e2
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2021-07-27T15:47:56.000Z
|
2021-07-27T15:47:56.000Z
|
tests/contrib/hooks/test_nomad_hook.py
|
YotpoLtd/incubator-airflow
|
86bd47db6084b23f4eb4b4c1dfc7f0293e4308e2
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
tests/contrib/hooks/test_nomad_hook.py
|
YotpoLtd/incubator-airflow
|
86bd47db6084b23f4eb4b4c1dfc7f0293e4308e2
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.nomad_hook import NomadHook
class TestNomadHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@patch("airflow.contrib.hooks.nomad_hook.NomadHook.get_nomad_client")
def test_nomad_client_connection(self, get_nomad_client):
NomadHook(nomad_conn_id='nomad_default')
self.assertTrue(get_nomad_client.called_once())
if __name__ == '__main__':
unittest.main()
| 30.388889
| 74
| 0.756856
|
import unittest
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.nomad_hook import NomadHook
class TestNomadHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@patch("airflow.contrib.hooks.nomad_hook.NomadHook.get_nomad_client")
def test_nomad_client_connection(self, get_nomad_client):
NomadHook(nomad_conn_id='nomad_default')
self.assertTrue(get_nomad_client.called_once())
if __name__ == '__main__':
unittest.main()
| true
| true
|
f715bb669761fdc7d43cef478f02a3c2769d3f57
| 1,243
|
py
|
Python
|
scripts/suse/yum/plugins/yumnotify.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 2
|
2020-11-02T22:08:26.000Z
|
2020-11-14T13:44:46.000Z
|
scripts/suse/yum/plugins/yumnotify.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 4
|
2021-02-06T14:30:48.000Z
|
2021-12-13T20:50:10.000Z
|
scripts/suse/yum/plugins/yumnotify.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 2
|
2020-11-04T06:32:02.000Z
|
2020-11-06T11:01:18.000Z
|
# Copyright (c) 2016 SUSE Linux LLC
# All Rights Reserved.
#
# Author: Bo Maryniuk <bo@suse.de>
import hashlib
import os
from yum import config
from yum.plugins import TYPE_CORE
CK_PATH = "/var/cache/salt/minion/rpmdb.cookie"
RPM_PATH = "/var/lib/rpm/Packages"
requires_api_version = "2.5"
plugin_type = TYPE_CORE
def _get_mtime():
"""
Get the modified time of the RPM Database.
Returns:
Unix ticks
"""
return os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH)) or 0
def _get_checksum():
"""
Get the checksum of the RPM Database.
Returns:
hexdigest
"""
digest = hashlib.sha256()
with open(RPM_PATH, "rb") as rpm_db_fh:
while True:
buff = rpm_db_fh.read(0x1000)
if not buff:
break
digest.update(buff)
return digest.hexdigest()
def posttrans_hook(conduit):
"""
Hook after the package installation transaction.
:param conduit:
:return:
"""
# Integrate Yum with Salt
if "SALT_RUNNING" not in os.environ:
with open(CK_PATH, "w") as ck_fh:
ck_fh.write(
"{chksum} {mtime}\n".format(chksum=_get_checksum(), mtime=_get_mtime())
)
| 21.067797
| 87
| 0.618665
|
import hashlib
import os
from yum import config
from yum.plugins import TYPE_CORE
CK_PATH = "/var/cache/salt/minion/rpmdb.cookie"
RPM_PATH = "/var/lib/rpm/Packages"
requires_api_version = "2.5"
plugin_type = TYPE_CORE
def _get_mtime():
return os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH)) or 0
def _get_checksum():
digest = hashlib.sha256()
with open(RPM_PATH, "rb") as rpm_db_fh:
while True:
buff = rpm_db_fh.read(0x1000)
if not buff:
break
digest.update(buff)
return digest.hexdigest()
def posttrans_hook(conduit):
if "SALT_RUNNING" not in os.environ:
with open(CK_PATH, "w") as ck_fh:
ck_fh.write(
"{chksum} {mtime}\n".format(chksum=_get_checksum(), mtime=_get_mtime())
)
| true
| true
|
f715bca80298b84ce5bd4435a0da66ffc75de251
| 19,652
|
py
|
Python
|
Bloxorz.py
|
ilkercankaya/Bloxorz
|
212e8f051329f4f7392e336b9a99d5c4ae78c019
|
[
"MIT"
] | null | null | null |
Bloxorz.py
|
ilkercankaya/Bloxorz
|
212e8f051329f4f7392e336b9a99d5c4ae78c019
|
[
"MIT"
] | null | null | null |
Bloxorz.py
|
ilkercankaya/Bloxorz
|
212e8f051329f4f7392e336b9a99d5c4ae78c019
|
[
"MIT"
] | null | null | null |
# 0 is for perpendicular mode
# 1 is for flat mode
# 0 is for X-Axis config
# 1 is for Y-Axis mode
from copy import deepcopy
class Block:
def __init__(self, givenboard, mode, config, positionfirstbox, positionsecondbox):
# Copy Board
self.board = givenboard
# Fill the Board with Block
self.board.field[positionfirstbox[0]][positionfirstbox[1]] = 2
if positionsecondbox != []:
self.board.field[positionsecondbox[0]][positionsecondbox[1]] = 2
self.mode = mode
self.config = config
self.positionFirstBox = positionfirstbox
self.positionSecondBox = positionsecondbox
def isgamewon(self):
if self.mode == 0 and self.positionFirstBox == self.board.goal:
return True
else:
return False
def ismovableleft(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableright(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableup(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def ismovabledown(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def getleft(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [positionFirstBox, positionSecondBox, 1, self.config]
def moveleft(self):
if self.mode == 0:
if self.ismovableleft():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
# Change Mode and Config
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableleft():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
# Update object location
self.positionSecondBox = []
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
# Change Mode
self.mode = 0
return True
if self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return True
else:
return False
def moveright(self):
if self.mode == 0:
if self.ismovableright():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
# Change Mode
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableright():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionSecondBox[1] + 1] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
if self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return True
else:
return False
def getright(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
def moveup(self):
if self.mode == 0:
if self.ismovableup():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
# Change Mode
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovableup():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
else:
return False
def getup(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def movedown(self):
if self.mode == 0:
if self.ismovabledown():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
# Change Mode
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovabledown():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
else:
return False
def getdown(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
# Adjust the box positions
positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
# Adjust the box positions
positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def printfield(self):
printer = deepcopy(self.board.field).astype(str)
# Transfer the field and print
for i in range(self.board.field.shape[0]):
for j in range(self.board.field.shape[1]):
if self.board.field[i][j] == 1:
printer[i][j] = 'X'
elif self.board.field[i][j] == 0:
printer[i][j] = 'O'
elif self.board.field[i][j] == 2:
printer[i][j] = 'S'
elif self.board.field[i][j] == 3:
printer[i][j] = 'G'
print("Current Board: \n", printer,"\n")
class Board:
def __init__(self, array):
# Conver the board and store
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if array[i][j] == 'X':
array[i][j] = 1
elif array[i][j] == 'O':
array[i][j] = 0
elif array[i][j] == 'S':
array[i][j] = 2
elif array[i][j] == 'G':
array[i][j] = 3
self.field = array.astype(int)
for i in range(self.field.shape[0]):
for j in range(self.field.shape[1]):
if self.field[i][j] == 3:
# Update Field And Set The Goal Point
self.field[i][j] = 0
self.goal = [i, j]
break
| 48.403941
| 112
| 0.520507
|
from copy import deepcopy
class Block:
def __init__(self, givenboard, mode, config, positionfirstbox, positionsecondbox):
self.board = givenboard
self.board.field[positionfirstbox[0]][positionfirstbox[1]] = 2
if positionsecondbox != []:
self.board.field[positionsecondbox[0]][positionsecondbox[1]] = 2
self.mode = mode
self.config = config
self.positionFirstBox = positionfirstbox
self.positionSecondBox = positionsecondbox
def isgamewon(self):
if self.mode == 0 and self.positionFirstBox == self.board.goal:
return True
else:
return False
def ismovableleft(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableright(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableup(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def ismovabledown(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def getleft(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [positionFirstBox, positionSecondBox, 1, self.config]
def moveleft(self):
if self.mode == 0:
if self.ismovableleft():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] = 2
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableleft():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.positionSecondBox = []
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
self.mode = 0
return True
if self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] = 2
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return True
else:
return False
def moveright(self):
if self.mode == 0:
if self.ismovableright():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] = 2
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableright():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionSecondBox[1] + 1] = 2
self.positionFirstBox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
self.positionSecondBox = []
self.mode = 0
return True
if self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] = 2
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return True
else:
return False
def getright(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
def moveup(self):
if self.mode == 0:
if self.ismovableup():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] = 2
self.positionSecondBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovableup():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] = 2
self.positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionSecondBox = []
self.mode = 0
return True
else:
return False
def getup(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def movedown(self):
if self.mode == 0:
if self.ismovabledown():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] = 2
self.positionSecondBox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovabledown():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
self.positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
self.positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
self.positionSecondBox = []
self.mode = 0
return True
else:
return False
def getdown(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def printfield(self):
printer = deepcopy(self.board.field).astype(str)
for i in range(self.board.field.shape[0]):
for j in range(self.board.field.shape[1]):
if self.board.field[i][j] == 1:
printer[i][j] = 'X'
elif self.board.field[i][j] == 0:
printer[i][j] = 'O'
elif self.board.field[i][j] == 2:
printer[i][j] = 'S'
elif self.board.field[i][j] == 3:
printer[i][j] = 'G'
print("Current Board: \n", printer,"\n")
class Board:
def __init__(self, array):
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if array[i][j] == 'X':
array[i][j] = 1
elif array[i][j] == 'O':
array[i][j] = 0
elif array[i][j] == 'S':
array[i][j] = 2
elif array[i][j] == 'G':
array[i][j] = 3
self.field = array.astype(int)
for i in range(self.field.shape[0]):
for j in range(self.field.shape[1]):
if self.field[i][j] == 3:
self.field[i][j] = 0
self.goal = [i, j]
break
| true
| true
|
f715bcdc38ebdc27ec473e15774bc7f195755daa
| 5,704
|
py
|
Python
|
asposewordscloud/models/bookmark_data.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/bookmark_data.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/bookmark_data.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="bookmark_data.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
import json
class BookmarkData(object):
"""DTO for bookmark updating.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'text': 'str'
}
attribute_map = {
'name': 'Name',
'text': 'Text'
}
def __init__(self, name=None, text=None): # noqa: E501
"""BookmarkData - a model defined in Swagger""" # noqa: E501
self._name = None
self._text = None
self.discriminator = None
if name is not None:
self.name = name
if text is not None:
self.text = text
@property
def name(self):
"""Gets the name of this BookmarkData. # noqa: E501
Gets or sets the name of the bookmark. # noqa: E501
:return: The name of this BookmarkData. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this BookmarkData.
Gets or sets the name of the bookmark. # noqa: E501
:param name: The name of this BookmarkData. # noqa: E501
:type: str
"""
self._name = name
@property
def text(self):
"""Gets the text of this BookmarkData. # noqa: E501
Gets or sets text, enclosed in the bookmark. # noqa: E501
:return: The text of this BookmarkData. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this BookmarkData.
Gets or sets text, enclosed in the bookmark. # noqa: E501
:param text: The text of this BookmarkData. # noqa: E501
:type: str
"""
self._text = text
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BookmarkData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.409091
| 85
| 0.558555
|
import pprint
import re
import six
import json
class BookmarkData(object):
swagger_types = {
'name': 'str',
'text': 'str'
}
attribute_map = {
'name': 'Name',
'text': 'Text'
}
def __init__(self, name=None, text=None):
self._name = None
self._text = None
self.discriminator = None
if name is not None:
self.name = name
if text is not None:
self.text = text
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def text(self):
return self._text
@text.setter
def text(self, text):
self._text = text
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BookmarkData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f715bcfb6bc96e73744dd0a50b070cfdd7c67ca2
| 1,316
|
py
|
Python
|
torch_glow/tests/nodes/adaptive_avg_pool2d_test.py
|
YonginKwon/glow
|
7d316d028e1792534416755bf80af422adccdaa9
|
[
"Apache-2.0"
] | 2
|
2020-03-23T21:04:00.000Z
|
2020-04-02T22:49:49.000Z
|
torch_glow/tests/nodes/adaptive_avg_pool2d_test.py
|
YonginKwon/glow
|
7d316d028e1792534416755bf80af422adccdaa9
|
[
"Apache-2.0"
] | 1
|
2020-01-06T09:14:32.000Z
|
2020-01-06T09:14:32.000Z
|
torch_glow/tests/nodes/adaptive_avg_pool2d_test.py
|
YonginKwon/glow
|
7d316d028e1792534416755bf80af422adccdaa9
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAdaptiveAvgPool2d(unittest.TestCase):
def test_adaptive_avg_pool2d_basic(self):
"""Basic test of PyTorch adaptive_avg_pool2d Node."""
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 5))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_inputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square inputs."""
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (3, 3))
inputs = torch.randn(3, 6, 13, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_outputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square outputs."""
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 3))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
| 30.604651
| 82
| 0.668693
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAdaptiveAvgPool2d(unittest.TestCase):
def test_adaptive_avg_pool2d_basic(self):
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 5))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_inputs(self):
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (3, 3))
inputs = torch.randn(3, 6, 13, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_outputs(self):
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 3))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
| true
| true
|
f715bdd5034a351c309d1a984393c7e6094f054e
| 274
|
py
|
Python
|
apigw/typo.py
|
theztd/flaskapp-prom
|
e1f5137c319175fe8fc1db0ede8eec020cd2f008
|
[
"BSD-2-Clause"
] | 2
|
2021-02-27T21:08:00.000Z
|
2021-05-12T13:55:38.000Z
|
apigw/typo.py
|
theztd/flaskapp-prom
|
e1f5137c319175fe8fc1db0ede8eec020cd2f008
|
[
"BSD-2-Clause"
] | null | null | null |
apigw/typo.py
|
theztd/flaskapp-prom
|
e1f5137c319175fe8fc1db0ede8eec020cd2f008
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
def ret_string(name: str) -> str:
print(type(name))
return f"Hi {name}"
for n in ["Karel", "Pepa", 18, "Lucie"]:
try:
print(type(n))
print(ret_string(n))
except TypeError as err:
print(n)
print(err)
| 17.125
| 40
| 0.547445
|
def ret_string(name: str) -> str:
print(type(name))
return f"Hi {name}"
for n in ["Karel", "Pepa", 18, "Lucie"]:
try:
print(type(n))
print(ret_string(n))
except TypeError as err:
print(n)
print(err)
| true
| true
|
f715bdeeabab1bf9416cdf699d275a46c2adb6d6
| 270
|
py
|
Python
|
wafw00f/plugins/knownsec.py
|
aqyoung/scan-wafw00f
|
a95a94253f138d5ef791232ef4d8371de41622b6
|
[
"BSD-3-Clause"
] | 1
|
2019-08-01T11:19:55.000Z
|
2019-08-01T11:19:55.000Z
|
wafw00f/plugins/knownsec.py
|
aqyoung/scan-wafw00f
|
a95a94253f138d5ef791232ef4d8371de41622b6
|
[
"BSD-3-Clause"
] | null | null | null |
wafw00f/plugins/knownsec.py
|
aqyoung/scan-wafw00f
|
a95a94253f138d5ef791232ef4d8371de41622b6
|
[
"BSD-3-Clause"
] | 2
|
2017-12-27T15:56:15.000Z
|
2017-12-27T20:03:09.000Z
|
#!/usr/bin/env python
NAME = 'KS-WAF (KnownSec)'
def is_waf(self):
for attack in self.attacks:
r = attack(self)
if r is None:
return
_, page = r
if b'/ks-waf-error.png' in page:
return True
return False
| 18
| 40
| 0.525926
|
NAME = 'KS-WAF (KnownSec)'
def is_waf(self):
for attack in self.attacks:
r = attack(self)
if r is None:
return
_, page = r
if b'/ks-waf-error.png' in page:
return True
return False
| true
| true
|
f715be36cb847900ba0b72075d63650894204e29
| 16,357
|
py
|
Python
|
google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py
|
googleapis/python-bigquery-storage
|
acc92249013f1b31fdac2aa4bf5a6864730d7422
|
[
"Apache-2.0"
] | 44
|
2020-02-12T21:28:37.000Z
|
2022-03-31T06:16:30.000Z
|
google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py
|
googleapis/python-bigquery-storage
|
acc92249013f1b31fdac2aa4bf5a6864730d7422
|
[
"Apache-2.0"
] | 178
|
2020-02-05T10:49:45.000Z
|
2022-03-31T01:48:44.000Z
|
google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py
|
googleapis/python-bigquery-storage
|
acc92249013f1b31fdac2aa4bf5a6864730d7422
|
[
"Apache-2.0"
] | 23
|
2020-02-05T23:12:15.000Z
|
2022-02-24T08:33:14.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.bigquery_storage_v1beta2.types import storage
from google.cloud.bigquery_storage_v1beta2.types import stream
from .base import BigQueryReadTransport, DEFAULT_CLIENT_INFO
from .grpc import BigQueryReadGrpcTransport
class BigQueryReadGrpcAsyncIOTransport(BigQueryReadTransport):
"""gRPC AsyncIO backend transport for BigQueryRead.
BigQuery Read API.
The Read API can be used to read data from BigQuery.
New code should use the v1 Read API going forward, if they don't
use Write API at the same time.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_read_session(
self,
) -> Callable[[storage.CreateReadSessionRequest], Awaitable[stream.ReadSession]]:
r"""Return a callable for the create read session method over gRPC.
Creates a new read session. A read session divides
the contents of a BigQuery table into one or more
streams, which can then be used to read data from the
table. The read session also specifies properties of the
data to be read, such as a list of columns or a push-
down filter describing the rows to be returned.
A particular row can be read by at most one stream. When
the caller has reached the end of each stream in the
session, then all the data in the table has been read.
Data is assigned to each stream such that roughly the
same number of rows can be read from each stream.
Because the server-side unit for assigning data is
collections of rows, the API does not guarantee that
each stream will return the same number or rows.
Additionally, the limits are enforced based on the
number of pre-filtered rows, so some filters can lead to
lopsided assignments.
Read sessions automatically expire 6 hours after they
are created and do not require manual clean-up by the
caller.
Returns:
Callable[[~.CreateReadSessionRequest],
Awaitable[~.ReadSession]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_read_session" not in self._stubs:
self._stubs["create_read_session"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession",
request_serializer=storage.CreateReadSessionRequest.serialize,
response_deserializer=stream.ReadSession.deserialize,
)
return self._stubs["create_read_session"]
@property
def read_rows(
self,
) -> Callable[[storage.ReadRowsRequest], Awaitable[storage.ReadRowsResponse]]:
r"""Return a callable for the read rows method over gRPC.
Reads rows from the stream in the format prescribed
by the ReadSession. Each response contains one or more
table rows, up to a maximum of 100 MiB per response;
read requests which attempt to read individual rows
larger than 100 MiB will fail.
Each request also returns a set of stream statistics
reflecting the current state of the stream.
Returns:
Callable[[~.ReadRowsRequest],
Awaitable[~.ReadRowsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "read_rows" not in self._stubs:
self._stubs["read_rows"] = self.grpc_channel.unary_stream(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows",
request_serializer=storage.ReadRowsRequest.serialize,
response_deserializer=storage.ReadRowsResponse.deserialize,
)
return self._stubs["read_rows"]
@property
def split_read_stream(
self,
) -> Callable[
[storage.SplitReadStreamRequest], Awaitable[storage.SplitReadStreamResponse]
]:
r"""Return a callable for the split read stream method over gRPC.
Splits a given ``ReadStream`` into two ``ReadStream`` objects.
These ``ReadStream`` objects are referred to as the primary and
the residual streams of the split. The original ``ReadStream``
can still be read from in the same manner as before. Both of the
returned ``ReadStream`` objects can also be read from, and the
rows returned by both child streams will be the same as the rows
read from the original stream.
Moreover, the two child streams will be allocated back-to-back
in the original ``ReadStream``. Concretely, it is guaranteed
that for streams original, primary, and residual, that
original[0-j] = primary[0-j] and original[j-n] = residual[0-m]
once the streams have been read to completion.
Returns:
Callable[[~.SplitReadStreamRequest],
Awaitable[~.SplitReadStreamResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "split_read_stream" not in self._stubs:
self._stubs["split_read_stream"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream",
request_serializer=storage.SplitReadStreamRequest.serialize,
response_deserializer=storage.SplitReadStreamResponse.deserialize,
)
return self._stubs["split_read_stream"]
def close(self):
return self.grpc_channel.close()
__all__ = ("BigQueryReadGrpcAsyncIOTransport",)
| 45.310249
| 88
| 0.645656
|
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import packaging.version
import grpc
from grpc.experimental import aio
from google.cloud.bigquery_storage_v1beta2.types import storage
from google.cloud.bigquery_storage_v1beta2.types import stream
from .base import BigQueryReadTransport, DEFAULT_CLIENT_INFO
from .grpc import BigQueryReadGrpcTransport
class BigQueryReadGrpcAsyncIOTransport(BigQueryReadTransport):
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
return self._grpc_channel
@property
def create_read_session(
self,
) -> Callable[[storage.CreateReadSessionRequest], Awaitable[stream.ReadSession]]:
if "create_read_session" not in self._stubs:
self._stubs["create_read_session"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession",
request_serializer=storage.CreateReadSessionRequest.serialize,
response_deserializer=stream.ReadSession.deserialize,
)
return self._stubs["create_read_session"]
@property
def read_rows(
self,
) -> Callable[[storage.ReadRowsRequest], Awaitable[storage.ReadRowsResponse]]:
if "read_rows" not in self._stubs:
self._stubs["read_rows"] = self.grpc_channel.unary_stream(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows",
request_serializer=storage.ReadRowsRequest.serialize,
response_deserializer=storage.ReadRowsResponse.deserialize,
)
return self._stubs["read_rows"]
@property
def split_read_stream(
self,
) -> Callable[
[storage.SplitReadStreamRequest], Awaitable[storage.SplitReadStreamResponse]
]:
if "split_read_stream" not in self._stubs:
self._stubs["split_read_stream"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream",
request_serializer=storage.SplitReadStreamRequest.serialize,
response_deserializer=storage.SplitReadStreamResponse.deserialize,
)
return self._stubs["split_read_stream"]
def close(self):
return self.grpc_channel.close()
__all__ = ("BigQueryReadGrpcAsyncIOTransport",)
| true
| true
|
f715bec4b32bc353255f534021aae397a4a5e309
| 5,398
|
py
|
Python
|
repair/evaluate.py
|
h4iku/repairSStuBs
|
0caa6269801d13f4743e6b2c8d34c01057f3b4b7
|
[
"MIT"
] | 2
|
2021-07-16T04:30:10.000Z
|
2022-01-05T01:33:42.000Z
|
repair/evaluate.py
|
h4iku/repairSStuBs
|
0caa6269801d13f4743e6b2c8d34c01057f3b4b7
|
[
"MIT"
] | 1
|
2021-06-17T06:57:49.000Z
|
2021-06-18T00:13:14.000Z
|
repair/evaluate.py
|
h4iku/repairSStuBs
|
0caa6269801d13f4743e6b2c8d34c01057f3b4b7
|
[
"MIT"
] | 1
|
2021-05-27T05:50:17.000Z
|
2021-05-27T05:50:17.000Z
|
import csv
import difflib
import shutil
from collections import defaultdict
from statistics import mean
from pytablewriter import MarkdownTableWriter
from tqdm import tqdm
from utils.config import CORRECT_PATCHES, INPUT, REPAIR_OUTPUT, REPAIR_RESULT
class Result:
def __init__(self, buggy_file_line_dir, comparison_result,
fixed_file_line_dir, file_name, project_name, bug_type):
self.buggy_file_line_dir = buggy_file_line_dir
self.comparison_result = eval(comparison_result)
self.fixed_file_line_dir = fixed_file_line_dir
self.file_name = file_name
self.project_name = project_name
self.bug_type = bug_type
self.buggy_file = INPUT / self.buggy_file_line_dir / self.file_name
self.fixed_file = INPUT / self.fixed_file_line_dir / self.file_name
if self.fix_patch_number():
self.genfixed_file = (REPAIR_OUTPUT / self.buggy_file_line_dir /
str(self.fix_patch_number()) / self.file_name)
else:
self.genfixed_file = None
def __eq__(self, other):
return (self.buggy_file_line_dir == other.buggy_file_line_dir
and self.bug_type == other.bug_type)
def fix_patch_number(self):
if True in self.comparison_result:
return self.comparison_result.index(True) + 1
else:
return None
def copy_files(self):
"""Copies result files to a designated directory"""
# Create the destination directory
copy_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
copy_path.mkdir(parents=True, exist_ok=True)
# Copy the buggy file
shutil.copyfile(self.buggy_file, copy_path / 'BuggyFile.java')
# Copy the actual fixed file
shutil.copyfile(self.fixed_file, copy_path / 'FixedFile.java')
# Copy the correctly generated fixed file
shutil.copyfile(self.genfixed_file, copy_path /
'GeneratedFixFile.java')
def generate_diffs(self):
save_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
save_path.mkdir(parents=True, exist_ok=True)
# Diff between buggy file and actual fixed file
with open(self.buggy_file) as buggy_file:
with open(self.fixed_file) as fixed_file:
bugfix_diff = difflib.unified_diff(
buggy_file.readlines(),
fixed_file.readlines(),
fromfile='BuggyFile.java', tofile='FixedFile.java'
)
with open(save_path / 'bugfix.diff', 'w') as bugfix_file:
bugfix_file.writelines(bugfix_diff)
# Diff between buggy file and the generated fixed file
with open(self.buggy_file) as buggy_file:
with open(self.genfixed_file) as genfixed_file:
genfix_diff = difflib.unified_diff(
buggy_file.readlines(),
genfixed_file.readlines(),
fromfile='BuggyFile.java', tofile='GeneratedFixFile.java'
)
with open(save_path / 'genfix.diff', 'w') as genfix_file:
genfix_file.writelines(genfix_diff)
def main():
with open(REPAIR_RESULT, newline='') as file:
reader = csv.reader(file)
all_results = [Result(*line) for line in reader]
# Removing duplicates
results = []
for result in all_results:
if result not in results:
results.append(result)
# Copying results and generating diffs
for res in tqdm(results):
if res.fix_patch_number():
res.copy_files()
res.generate_diffs()
# Evaluating
total_gen_patches = [res.comparison_result for res in results]
num_total_gen_patches = [len(x) for x in total_gen_patches]
print(f'Total generated patches: {sum(num_total_gen_patches)}')
print(f'min: {min(num_total_gen_patches)}, '
f'max: {max(num_total_gen_patches)}, '
f'avg: {mean(num_total_gen_patches)}')
num_fixes = [1 for x in total_gen_patches if any(x)]
print(f'Total bugs: {len(results)}', f'Fixed: {sum(num_fixes)}')
patterns = defaultdict(lambda: [0, 0, []])
for res in results:
gen_patches = res.comparison_result
patterns[res.bug_type][-1].append(len(gen_patches))
if any(gen_patches):
patterns[res.bug_type][0] += 1
patterns[res.bug_type][1] += 1
else:
patterns[res.bug_type][0] += 1
print('Number of min, max, avg generated patches:')
print([(ptn, min(vals[-1]), max(vals[-1]), mean(vals[-1]))
for ptn, vals in patterns.items()])
# Sort by the number of bugs
patterns_list = sorted(patterns.items(),
key=lambda x: x[1][0], reverse=True)
value_matrix = [
[ptn] + vals[:-1] + [f'{(vals[1] / vals[0]) * 100:.2f}%']
for ptn, vals in patterns_list
]
value_matrix.append(
['Total', sstubs := len(results),
corrects := sum(num_fixes),
f'{(corrects / sstubs) * 100:.2f}%']
)
# Configuring the Markdown table
writer = MarkdownTableWriter(
table_name="repair_results",
headers=["Pattern Name", "SStuBs", "Correct Patches", "Ratio"],
value_matrix=value_matrix,
)
writer.write_table()
if __name__ == '__main__':
main()
| 35.051948
| 80
| 0.626343
|
import csv
import difflib
import shutil
from collections import defaultdict
from statistics import mean
from pytablewriter import MarkdownTableWriter
from tqdm import tqdm
from utils.config import CORRECT_PATCHES, INPUT, REPAIR_OUTPUT, REPAIR_RESULT
class Result:
def __init__(self, buggy_file_line_dir, comparison_result,
fixed_file_line_dir, file_name, project_name, bug_type):
self.buggy_file_line_dir = buggy_file_line_dir
self.comparison_result = eval(comparison_result)
self.fixed_file_line_dir = fixed_file_line_dir
self.file_name = file_name
self.project_name = project_name
self.bug_type = bug_type
self.buggy_file = INPUT / self.buggy_file_line_dir / self.file_name
self.fixed_file = INPUT / self.fixed_file_line_dir / self.file_name
if self.fix_patch_number():
self.genfixed_file = (REPAIR_OUTPUT / self.buggy_file_line_dir /
str(self.fix_patch_number()) / self.file_name)
else:
self.genfixed_file = None
def __eq__(self, other):
return (self.buggy_file_line_dir == other.buggy_file_line_dir
and self.bug_type == other.bug_type)
def fix_patch_number(self):
if True in self.comparison_result:
return self.comparison_result.index(True) + 1
else:
return None
def copy_files(self):
copy_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
copy_path.mkdir(parents=True, exist_ok=True)
shutil.copyfile(self.buggy_file, copy_path / 'BuggyFile.java')
shutil.copyfile(self.fixed_file, copy_path / 'FixedFile.java')
shutil.copyfile(self.genfixed_file, copy_path /
'GeneratedFixFile.java')
def generate_diffs(self):
save_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
save_path.mkdir(parents=True, exist_ok=True)
with open(self.buggy_file) as buggy_file:
with open(self.fixed_file) as fixed_file:
bugfix_diff = difflib.unified_diff(
buggy_file.readlines(),
fixed_file.readlines(),
fromfile='BuggyFile.java', tofile='FixedFile.java'
)
with open(save_path / 'bugfix.diff', 'w') as bugfix_file:
bugfix_file.writelines(bugfix_diff)
with open(self.buggy_file) as buggy_file:
with open(self.genfixed_file) as genfixed_file:
genfix_diff = difflib.unified_diff(
buggy_file.readlines(),
genfixed_file.readlines(),
fromfile='BuggyFile.java', tofile='GeneratedFixFile.java'
)
with open(save_path / 'genfix.diff', 'w') as genfix_file:
genfix_file.writelines(genfix_diff)
def main():
with open(REPAIR_RESULT, newline='') as file:
reader = csv.reader(file)
all_results = [Result(*line) for line in reader]
results = []
for result in all_results:
if result not in results:
results.append(result)
for res in tqdm(results):
if res.fix_patch_number():
res.copy_files()
res.generate_diffs()
total_gen_patches = [res.comparison_result for res in results]
num_total_gen_patches = [len(x) for x in total_gen_patches]
print(f'Total generated patches: {sum(num_total_gen_patches)}')
print(f'min: {min(num_total_gen_patches)}, '
f'max: {max(num_total_gen_patches)}, '
f'avg: {mean(num_total_gen_patches)}')
num_fixes = [1 for x in total_gen_patches if any(x)]
print(f'Total bugs: {len(results)}', f'Fixed: {sum(num_fixes)}')
patterns = defaultdict(lambda: [0, 0, []])
for res in results:
gen_patches = res.comparison_result
patterns[res.bug_type][-1].append(len(gen_patches))
if any(gen_patches):
patterns[res.bug_type][0] += 1
patterns[res.bug_type][1] += 1
else:
patterns[res.bug_type][0] += 1
print('Number of min, max, avg generated patches:')
print([(ptn, min(vals[-1]), max(vals[-1]), mean(vals[-1]))
for ptn, vals in patterns.items()])
patterns_list = sorted(patterns.items(),
key=lambda x: x[1][0], reverse=True)
value_matrix = [
[ptn] + vals[:-1] + [f'{(vals[1] / vals[0]) * 100:.2f}%']
for ptn, vals in patterns_list
]
value_matrix.append(
['Total', sstubs := len(results),
corrects := sum(num_fixes),
f'{(corrects / sstubs) * 100:.2f}%']
)
writer = MarkdownTableWriter(
table_name="repair_results",
headers=["Pattern Name", "SStuBs", "Correct Patches", "Ratio"],
value_matrix=value_matrix,
)
writer.write_table()
if __name__ == '__main__':
main()
| true
| true
|
f715bf4feddced17be81d083c4130de44ac9c701
| 1,692
|
py
|
Python
|
multiple-images/images/migrations/0001_initial.py
|
mp5maker/django
|
a2d38e2e9973e755afce1bd0ccb17e58f3db7e33
|
[
"MIT"
] | null | null | null |
multiple-images/images/migrations/0001_initial.py
|
mp5maker/django
|
a2d38e2e9973e755afce1bd0ccb17e58f3db7e33
|
[
"MIT"
] | 13
|
2020-02-12T00:14:20.000Z
|
2022-02-10T08:46:42.000Z
|
multiple-images/images/migrations/0001_initial.py
|
mp5maker/django
|
a2d38e2e9973e755afce1bd0ccb17e58f3db7e33
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-06-13 17:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Description',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('description', models.TextField(blank=True)),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('image', models.ImageField(blank=True, upload_to='images/%y/%m/%d')),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='images.Description')),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
]
| 36
| 144
| 0.536643
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Description',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('description', models.TextField(blank=True)),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('image', models.ImageField(blank=True, upload_to='images/%y/%m/%d')),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='images.Description')),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
]
| true
| true
|
f715bf76749dbe6664dac3361f0fd6ab0369fb12
| 1,123
|
py
|
Python
|
websauna/tests/core/test_views.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | 286
|
2016-01-17T05:44:02.000Z
|
2022-02-07T20:28:49.000Z
|
websauna/tests/core/test_views.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | 203
|
2016-03-15T02:00:53.000Z
|
2021-09-27T10:48:49.000Z
|
websauna/tests/core/test_views.py
|
ooduor/websauna
|
2e78cd87eda305fbbb1080d386b8cf96537360e5
|
[
"CNRI-Python"
] | 71
|
2016-01-17T11:04:26.000Z
|
2021-08-24T08:04:31.000Z
|
# Standard Library
import os
import pytest
# Websauna
from websauna.system import Initializer
from websauna.system.core.route import add_template_only_view
from websauna.tests.fixtures import get_app
from websauna.tests.webserver import customized_web_server
HERE = os.path.abspath(os.path.dirname(__file__))
def extra_init(init: Initializer):
"""Configure one templated only view."""
config = init.config
config.add_jinja2_search_path(HERE + "/templates", name=".html")
add_template_only_view(config, "/dummy", "dummy", "dummy.html")
@pytest.fixture(scope="module")
def app(request, ini_settings):
"""Construct a WSGI app with tutorial models and admins loaded."""
app = get_app(ini_settings, extra_init=extra_init)
return app
@pytest.fixture(scope="module")
def web_server(request, app):
"""Run a web server
with tutorial installed."""
web_server = customized_web_server(request, app)
return web_server()
def test_template_only_view(browser, web_server):
"""See that we can register and render a template only view."""
browser.visit(web_server + "/dummy")
| 26.738095
| 70
| 0.743544
|
import os
import pytest
from websauna.system import Initializer
from websauna.system.core.route import add_template_only_view
from websauna.tests.fixtures import get_app
from websauna.tests.webserver import customized_web_server
HERE = os.path.abspath(os.path.dirname(__file__))
def extra_init(init: Initializer):
config = init.config
config.add_jinja2_search_path(HERE + "/templates", name=".html")
add_template_only_view(config, "/dummy", "dummy", "dummy.html")
@pytest.fixture(scope="module")
def app(request, ini_settings):
app = get_app(ini_settings, extra_init=extra_init)
return app
@pytest.fixture(scope="module")
def web_server(request, app):
web_server = customized_web_server(request, app)
return web_server()
def test_template_only_view(browser, web_server):
browser.visit(web_server + "/dummy")
| true
| true
|
f715c03152794cffe7e9e530cbda79e5552a407d
| 591
|
bzl
|
Python
|
Examples/ReactNativeKakaoExample/android/app/build_defs.bzl
|
namdq97/react-native-kakao-login
|
603d4f75c912ecdefcfbc2bb7ace02b530a06083
|
[
"MIT"
] | 46
|
2017-05-14T13:01:24.000Z
|
2022-01-19T00:35:23.000Z
|
Examples/ReactNativeKakaoExample/android/app/build_defs.bzl
|
namdq97/react-native-kakao-login
|
603d4f75c912ecdefcfbc2bb7ace02b530a06083
|
[
"MIT"
] | 12
|
2018-01-12T08:00:27.000Z
|
2019-08-11T03:07:47.000Z
|
Examples/ReactNativeKakaoExample/android/app/build_defs.bzl
|
namdq97/react-native-kakao-login
|
603d4f75c912ecdefcfbc2bb7ace02b530a06083
|
[
"MIT"
] | 16
|
2017-05-14T13:29:53.000Z
|
2020-11-26T04:01:46.000Z
|
"""Helper definitions to glob .aar and .jar targets"""
def create_aar_targets(aarfiles):
for aarfile in aarfiles:
name = "aars__" + aarfile[aarfile.rindex("/") + 1:aarfile.rindex(".aar")]
lib_deps.append(":" + name)
android_prebuilt_aar(
name = name,
aar = aarfile,
)
def create_jar_targets(jarfiles):
for jarfile in jarfiles:
name = "jars__" + jarfile[jarfile.rindex("/") + 1:jarfile.rindex(".jar")]
lib_deps.append(":" + name)
prebuilt_jar(
name = name,
binary_jar = jarfile,
)
| 34.764706
| 81
| 0.57868
|
def create_aar_targets(aarfiles):
for aarfile in aarfiles:
name = "aars__" + aarfile[aarfile.rindex("/") + 1:aarfile.rindex(".aar")]
lib_deps.append(":" + name)
android_prebuilt_aar(
name = name,
aar = aarfile,
)
def create_jar_targets(jarfiles):
for jarfile in jarfiles:
name = "jars__" + jarfile[jarfile.rindex("/") + 1:jarfile.rindex(".jar")]
lib_deps.append(":" + name)
prebuilt_jar(
name = name,
binary_jar = jarfile,
)
| true
| true
|
f715c0b803bbc30d25555211d20acb2cc3914485
| 21
|
py
|
Python
|
prm/__init__.py
|
fz420/prm
|
19d8b27a679d6f9e669e019a563c3433025ba0c6
|
[
"MIT"
] | null | null | null |
prm/__init__.py
|
fz420/prm
|
19d8b27a679d6f9e669e019a563c3433025ba0c6
|
[
"MIT"
] | 3
|
2021-03-05T06:42:18.000Z
|
2021-04-30T03:34:30.000Z
|
prm/__init__.py
|
fz420/prm
|
19d8b27a679d6f9e669e019a563c3433025ba0c6
|
[
"MIT"
] | 2
|
2021-04-23T03:19:57.000Z
|
2021-04-23T03:49:55.000Z
|
from .prm import main
| 21
| 21
| 0.809524
|
from .prm import main
| true
| true
|
f715c0cdf7d479cb571c7245fd89a407280d6b17
| 5,913
|
py
|
Python
|
src/visualisation/arrow.py
|
sdat2/seager19
|
9c3acbc5332da787de1eda2600a82490ff20fa11
|
[
"MIT"
] | 5
|
2021-04-08T19:03:52.000Z
|
2021-12-17T14:22:49.000Z
|
src/visualisation/arrow.py
|
sdat2/seager19
|
9c3acbc5332da787de1eda2600a82490ff20fa11
|
[
"MIT"
] | 25
|
2021-04-08T13:53:11.000Z
|
2022-03-17T19:45:15.000Z
|
src/visualisation/arrow.py
|
sdat2/seager19
|
9c3acbc5332da787de1eda2600a82490ff20fa11
|
[
"MIT"
] | null | null | null |
"""Arrow plots for mechanism."""
import os
from src.plot_utils import ps_defaults
from src.constants import FIGURE_PATH
from typing import Optional
import matplotlib.pyplot as plt
def plot_arrow_plot(save_path: Optional[str] = None, show_plots: bool = False) -> None:
"""
Plot the arrow plot to show that I have reproduced the paper.
Args:
save_path (Optional[str], optional): Where to save the plot to.
Defaults to None. If None will not save.
show_plots (bool, optional): Whether to show plots. Defaults to False.
"""
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
# ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')
ax.arrow(
1,
ecmwf,
0,
0.054 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + 0.054, 0.005, "EECE")
ax.arrow(
2,
ecmwf,
0,
0.31 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + 0.31, 0.03, "EEEC")
ax.arrow(
3,
ecmwf,
0,
0.47 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + 0.47, 0.04, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim, [ecmwf + 0.478, ecmwf + 0.478], color="red", label="CMIP5 MMM $= 0.889$ K"
)
# plt.xticks([0, 1, 2, 3], ["ECMWF", "W", "RH", "RH+W"])
plt.xticks(
[1, 2, 3],
[
"W\n" + r"$+ 0.054 \pm 0.005$ K ",
"RH\n " + r"$+ 0.31 \pm 0.03$ K",
"RH+W\n " + r"$+ 0.47 \pm 0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
def plot_arrow_plot_6(
save_path: Optional[str] = None, show_plots: bool = False
) -> None:
"""
Plot the arrow plot to show how it performs in cmip6.
Args:
save_path (Optional[str], optional): Where to save the plot to.
Defaults to None. If None will not save.
show_plots (bool, optional): Whether to show plots. Defaults to False.
"""
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
# ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')
wind = 0.07
wind_error = 0.01
rh = 0.15
rh_error = 0.02
cmip6 = 0.772
rh_and_wind = 0.29
rh_and_wind_error = 0.04
ax.arrow(
1,
ecmwf,
0,
wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + wind, wind_error, "EECE")
ax.arrow(
2,
ecmwf,
0,
rh - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + rh, rh_error, "EEEC")
ax.arrow(
3,
ecmwf,
0,
rh_and_wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + rh_and_wind, rh_and_wind_error, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim,
[cmip6, cmip6],
color="red",
label="CMIP6 MMM $= 0.772$ K",
)
# plt.xticks([0, 1, 2, 3], ["ECMWF", "W", "RH", "RH+W"])
plt.xticks(
[1, 2, 3],
[
"W\n"
+ r"$+ $"
+ str(wind)
+ r" $\pm$ "
+ r"$"
+ str(wind_error)
+ r"$"
+ " K ",
"RH\n " + r"$+ $ $0.15$ $\pm$ $0.02$ K",
"RH+W\n " + r"$+ $ $0.29$ $\pm$ $0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
if __name__ == "__main__":
# python src/visualisation.arrow()
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.pdf"))
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.png"))
| 25.161702
| 88
| 0.498393
|
import os
from src.plot_utils import ps_defaults
from src.constants import FIGURE_PATH
from typing import Optional
import matplotlib.pyplot as plt
def plot_arrow_plot(save_path: Optional[str] = None, show_plots: bool = False) -> None:
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
ax.arrow(
1,
ecmwf,
0,
0.054 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + 0.054, 0.005, "EECE")
ax.arrow(
2,
ecmwf,
0,
0.31 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + 0.31, 0.03, "EEEC")
ax.arrow(
3,
ecmwf,
0,
0.47 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + 0.47, 0.04, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim, [ecmwf + 0.478, ecmwf + 0.478], color="red", label="CMIP5 MMM $= 0.889$ K"
)
plt.xticks(
[1, 2, 3],
[
"W\n" + r"$+ 0.054 \pm 0.005$ K ",
"RH\n " + r"$+ 0.31 \pm 0.03$ K",
"RH+W\n " + r"$+ 0.47 \pm 0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
def plot_arrow_plot_6(
save_path: Optional[str] = None, show_plots: bool = False
) -> None:
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
wind = 0.07
wind_error = 0.01
rh = 0.15
rh_error = 0.02
cmip6 = 0.772
rh_and_wind = 0.29
rh_and_wind_error = 0.04
ax.arrow(
1,
ecmwf,
0,
wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + wind, wind_error, "EECE")
ax.arrow(
2,
ecmwf,
0,
rh - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + rh, rh_error, "EEEC")
ax.arrow(
3,
ecmwf,
0,
rh_and_wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + rh_and_wind, rh_and_wind_error, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim,
[cmip6, cmip6],
color="red",
label="CMIP6 MMM $= 0.772$ K",
)
plt.xticks(
[1, 2, 3],
[
"W\n"
+ r"$+ $"
+ str(wind)
+ r" $\pm$ "
+ r"$"
+ str(wind_error)
+ r"$"
+ " K ",
"RH\n " + r"$+ $ $0.15$ $\pm$ $0.02$ K",
"RH+W\n " + r"$+ $ $0.29$ $\pm$ $0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
if __name__ == "__main__":
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.pdf"))
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.png"))
| true
| true
|
f715c25e8a6baf9dd30e5de343c4575f046db6a9
| 11,450
|
py
|
Python
|
elite/route.py
|
mEDI-S/mEDI_s-Elite-Tools
|
c6927c79358a3781bdf9da0db82c8c7d46f70dc6
|
[
"BSD-3-Clause"
] | 15
|
2015-08-30T01:53:10.000Z
|
2021-02-19T21:35:07.000Z
|
elite/route.py
|
mEDI-S/mEDI_s-Elite-Tools
|
c6927c79358a3781bdf9da0db82c8c7d46f70dc6
|
[
"BSD-3-Clause"
] | 2
|
2018-02-21T22:13:37.000Z
|
2021-03-06T16:48:26.000Z
|
elite/route.py
|
mEDI-S/mEDI_s-Elite-Tools
|
c6927c79358a3781bdf9da0db82c8c7d46f70dc6
|
[
"BSD-3-Clause"
] | 7
|
2015-11-22T15:25:07.000Z
|
2020-05-23T01:29:40.000Z
|
# -*- coding: UTF8
'''
Created on 13.07.2015
@author: mEDI
'''
from elite.system import system as elitesystem
#from elite.rares import rares as eliterares
# from elite.route import route as eliteroute
class route(object):
'''
classdocs
'''
#__slots__ = ["bla"]
#bla =1
maxHops = None
maxJumpDistance = None
maxDeep = None
# die kürzeste route is nicht die beste wegen den optimalen preisen bei > 150ly
systemID = None
_before = None
# _initSystem = None # startsystem
initSystem = None # startsystem
possibleSystems = []
_raresInSystem = None
_availableSystemList = None
_sellDone = None
starDist = None
deep = 1
_hopsFromBefore = None
_dist = None # distance to before system
mydb = None
rares = None
system = None
def __init__(self, mydb, before=None, maxDeep=None, maxJumpDistance=None, maxHops=None):
# super(route, self).__init__()
self.mydb = mydb
self.possibleSystems = []
self._before = before
if before:
self.initSystem = before.initSystem
self.system = self.initSystem.system
self.maxHops = self.initSystem.maxHops
self.maxDeep = self.initSystem.maxDeep
self.maxJumpDistance = self.initSystem.maxJumpDistance
# self.rares = before.rares
else:
self.system = elitesystem(self.mydb)
self.maxDeep = maxDeep
self.maxHops = maxHops
self.maxJumpDistance = maxJumpDistance
# self.rares = eliterares(con)
def addPossibleSystems(self, systemID, dist, startdist, systemList):
# def addPossibleSystems(self, system, dist, rarelist):
newroute = route(self.mydb, self)
newroute._availableSystemList = systemList
newroute._dist = dist
newroute.systemID = systemID
newroute.starDist = startdist
newroute.deep = self.deep + 1
newroute._hopsFromBefore = int(round((dist / self.maxJumpDistance) + 0.5))
# new = {"System":system, "dist":dist, "rareslist":rarelist, "nextroute":route(self.con)}
self.possibleSystems.append(newroute)
def setMaxHops(self, hops):
self.maxHops = hops
def setmaxJumpDistance(self, dist):
self.maxJumpDistance = dist
def calcRoutingDeep(self):
MaxDeep = self.deep
for nextsystem in self.possibleSystems:
nMaxDeep = nextsystem.calcRoutingDeep()
if nMaxDeep > MaxDeep:
MaxDeep = nMaxDeep
return MaxDeep
def getLongRouting(self, maxdeep, dist, totalStartDist, totalHops, systems=[]):
systems.append(self.systemID)
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
print("system:%s -> %s deep: %d dist:%d totalStarDist:%d hops:%d" % (systems, self.systemID, nextsystem.deep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops))
nextsystem.getLongRouting(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, systems)
systems.pop()
def getMinHops(self, maxdeep, totalHops=0):
minHops = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minHops is None or minHops > nextsystem._hopsFromBefore + totalHops:
minHops = nextsystem._hopsFromBefore + totalHops
ret = nextsystem.getMinHops(maxdeep, nextsystem._hopsFromBefore + totalHops)
if ret and (minHops is None or minHops > ret):
minHops = ret
return minHops
def calcRouteSum(self):
totalSum = 1
for nextsystem in self.possibleSystems:
totalSum += nextsystem.calcRouteSum()
return totalSum
def getMinStarDist(self, maxdeep, starDist=0):
minStartDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minStartDist is None or minStartDist > nextsystem.starDist + starDist:
minStartDist = nextsystem.starDist + starDist
ret = nextsystem.getMinStarDist(maxdeep, nextsystem.starDist + starDist)
if ret and (minStartDist is None or minStartDist > ret):
minStartDist = ret
return minStartDist
def getMinDistFromBest(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None):
# first loop calculate optimal data
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
minDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist:
if minDist is None or minDist > nextsystem._dist + dist:
minDist = nextsystem._dist + dist
ret = nextsystem.getMinDistFromBest(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist)
if ret and (minDist is None or minDist > ret):
minDist = ret
return minDist
def getBestRoute(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None, minDist=None):
# first loop calculate optimal data
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
if minDist is None:
minDist = self.getMinDistFromBest(maxdeep, 0, 0, 0, minHops, minStardist)
# systems.append(self)
for nextsystem in self.possibleSystems:
if nextsystem and nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist and minDist == nextsystem._dist + dist:
#print("best system: %s deep: %d dist:%d totalStarDist:%d hops:%d" % ( self.systemID, nextsystem.deep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops))
before = nextsystem
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
break
res = nextsystem.getBestRoute(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist, minDist)
if res :
#print(res)
return res
def getAllRoutes(self, maxdeep):
routesList = []
def listWorker(curSystem):
if curSystem.deep == maxdeep:
routesList.append(curSystem)
return
for nextsys in curSystem.possibleSystems:
listWorker(nextsys)
listWorker(self.initSystem)
return routesList
def getSystemsFromRoute(self):
before = self
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
def getStardistanceFromRoute(self):
before = self
distance = 0
while before:
if before.starDist:
distance += before.starDist
before = before._before
return distance
def calcRoutenRecrusion(self, slowMode):
# self.queueLock.acquire()
if self.deep+1 >= self.maxDeep:
return
for nextsystem in self.possibleSystems:
nextsystem.calcAllRoutesFromSystem( slowMode)
def testExistRoute(self, system, currentRoute):
# testen ob alle systeme der route schon einmal in einer anderen kombination verwendet wurden
# recursive rareroute
count = len(currentRoute)+1
#if count == 1: return
def listWorker(curSystem, count):
if curSystem.systemID in currentRoute:
count -= 1
elif curSystem.systemID == system:
count -= 1
#print(count)
if count == 0:
# print(system, curSystem.systemID ,currentRoute)
# allow other routes to me drop only extaxt ends to me
if curSystem.systemID == system:
return True
return
for nextsys in curSystem.possibleSystems:
if listWorker(nextsys, count) == True:
return True
# print(self.initSystem)
return listWorker(self.initSystem, count)
def calcAllRoutesFromSystem(self, slowMode=False):
if len(self._availableSystemList) == 0: return
maxDistance = self.maxHops * self.maxJumpDistance
#print(len(self._availableSystemList), self._availableSystemList)
systems = self.system.getSystemsInDistance(self.systemID, maxDistance, self._availableSystemList)
#=======================================================================
# reverse=True long routes first sell more items
# reverse=False short routes first sell not all items
# only in slow mod no difference
#=====================================================================
currentRoute = []
if slowMode != True:
systems = sorted(systems, key=lambda system: system["dist"], reverse=True)
# build current routelist
currentRoute.append(self.systemID)
before = self._before
while before:
currentRoute.append(before.systemID)
before = before._before
for system in systems:
# print(system)
nextSystemlist = self._availableSystemList[:]
#nextSystemlist = []
for listitem in nextSystemlist:
if listitem[0] == system["System"]:
stardist = listitem[1]
nextSystemlist.remove(listitem)
break
if stardist == None:
stardist = 0
if slowMode == True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
else:
if self.testExistRoute(system["System"], currentRoute) != True:
#if True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
# self.addPossibleSystems(system["System"], system["dist"], newrareslist)
currentRoute = []
self._availableSystemList = []
nextSystemlist = []
systems = []
self.calcRoutenRecrusion(slowMode)
# return myRaresRoute
| 38.294314
| 230
| 0.585415
|
from elite.system import system as elitesystem
class route(object):
maxHops = None
maxJumpDistance = None
maxDeep = None
systemID = None
_before = None
m = None
possibleSystems = []
_raresInSystem = None
_availableSystemList = None
_sellDone = None
starDist = None
deep = 1
_hopsFromBefore = None
_dist = None
mydb = None
rares = None
system = None
def __init__(self, mydb, before=None, maxDeep=None, maxJumpDistance=None, maxHops=None):
self.mydb = mydb
self.possibleSystems = []
self._before = before
if before:
self.initSystem = before.initSystem
self.system = self.initSystem.system
self.maxHops = self.initSystem.maxHops
self.maxDeep = self.initSystem.maxDeep
self.maxJumpDistance = self.initSystem.maxJumpDistance
else:
self.system = elitesystem(self.mydb)
self.maxDeep = maxDeep
self.maxHops = maxHops
self.maxJumpDistance = maxJumpDistance
def addPossibleSystems(self, systemID, dist, startdist, systemList):
newroute = route(self.mydb, self)
newroute._availableSystemList = systemList
newroute._dist = dist
newroute.systemID = systemID
newroute.starDist = startdist
newroute.deep = self.deep + 1
newroute._hopsFromBefore = int(round((dist / self.maxJumpDistance) + 0.5))
self.possibleSystems.append(newroute)
def setMaxHops(self, hops):
self.maxHops = hops
def setmaxJumpDistance(self, dist):
self.maxJumpDistance = dist
def calcRoutingDeep(self):
MaxDeep = self.deep
for nextsystem in self.possibleSystems:
nMaxDeep = nextsystem.calcRoutingDeep()
if nMaxDeep > MaxDeep:
MaxDeep = nMaxDeep
return MaxDeep
def getLongRouting(self, maxdeep, dist, totalStartDist, totalHops, systems=[]):
systems.append(self.systemID)
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
print("system:%s -> %s deep: %d dist:%d totalStarDist:%d hops:%d" % (systems, self.systemID, nextsystem.deep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops))
nextsystem.getLongRouting(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, systems)
systems.pop()
def getMinHops(self, maxdeep, totalHops=0):
minHops = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minHops is None or minHops > nextsystem._hopsFromBefore + totalHops:
minHops = nextsystem._hopsFromBefore + totalHops
ret = nextsystem.getMinHops(maxdeep, nextsystem._hopsFromBefore + totalHops)
if ret and (minHops is None or minHops > ret):
minHops = ret
return minHops
def calcRouteSum(self):
totalSum = 1
for nextsystem in self.possibleSystems:
totalSum += nextsystem.calcRouteSum()
return totalSum
def getMinStarDist(self, maxdeep, starDist=0):
minStartDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minStartDist is None or minStartDist > nextsystem.starDist + starDist:
minStartDist = nextsystem.starDist + starDist
ret = nextsystem.getMinStarDist(maxdeep, nextsystem.starDist + starDist)
if ret and (minStartDist is None or minStartDist > ret):
minStartDist = ret
return minStartDist
def getMinDistFromBest(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None):
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
minDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist:
if minDist is None or minDist > nextsystem._dist + dist:
minDist = nextsystem._dist + dist
ret = nextsystem.getMinDistFromBest(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist)
if ret and (minDist is None or minDist > ret):
minDist = ret
return minDist
def getBestRoute(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None, minDist=None):
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
if minDist is None:
minDist = self.getMinDistFromBest(maxdeep, 0, 0, 0, minHops, minStardist)
for nextsystem in self.possibleSystems:
if nextsystem and nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist and minDist == nextsystem._dist + dist:
before = nextsystem
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
break
res = nextsystem.getBestRoute(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist, minDist)
if res :
return res
def getAllRoutes(self, maxdeep):
routesList = []
def listWorker(curSystem):
if curSystem.deep == maxdeep:
routesList.append(curSystem)
return
for nextsys in curSystem.possibleSystems:
listWorker(nextsys)
listWorker(self.initSystem)
return routesList
def getSystemsFromRoute(self):
before = self
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
def getStardistanceFromRoute(self):
before = self
distance = 0
while before:
if before.starDist:
distance += before.starDist
before = before._before
return distance
def calcRoutenRecrusion(self, slowMode):
if self.deep+1 >= self.maxDeep:
return
for nextsystem in self.possibleSystems:
nextsystem.calcAllRoutesFromSystem( slowMode)
def testExistRoute(self, system, currentRoute):
count = len(currentRoute)+1
def listWorker(curSystem, count):
if curSystem.systemID in currentRoute:
count -= 1
elif curSystem.systemID == system:
count -= 1
if count == 0:
if curSystem.systemID == system:
return True
return
for nextsys in curSystem.possibleSystems:
if listWorker(nextsys, count) == True:
return True
return listWorker(self.initSystem, count)
def calcAllRoutesFromSystem(self, slowMode=False):
if len(self._availableSystemList) == 0: return
maxDistance = self.maxHops * self.maxJumpDistance
systems = self.system.getSystemsInDistance(self.systemID, maxDistance, self._availableSystemList)
currentRoute = []
if slowMode != True:
systems = sorted(systems, key=lambda system: system["dist"], reverse=True)
currentRoute.append(self.systemID)
before = self._before
while before:
currentRoute.append(before.systemID)
before = before._before
for system in systems:
nextSystemlist = self._availableSystemList[:]
for listitem in nextSystemlist:
if listitem[0] == system["System"]:
stardist = listitem[1]
nextSystemlist.remove(listitem)
break
if stardist == None:
stardist = 0
if slowMode == True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
else:
if self.testExistRoute(system["System"], currentRoute) != True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
currentRoute = []
self._availableSystemList = []
nextSystemlist = []
systems = []
self.calcRoutenRecrusion(slowMode)
| true
| true
|
f715c277513bc3a3aa82c20df1b2e8276d462a27
| 9,895
|
py
|
Python
|
pcdsdevices/tests/test_ccm.py
|
vespos/pcdsdevices
|
7c4728df62ea58b6491d1cb36bb39d27d6dd9fca
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-06-17T20:08:54.000Z
|
2022-01-11T17:55:21.000Z
|
pcdsdevices/tests/test_ccm.py
|
vespos/pcdsdevices
|
7c4728df62ea58b6491d1cb36bb39d27d6dd9fca
|
[
"BSD-3-Clause-LBNL"
] | 757
|
2017-12-21T23:16:41.000Z
|
2022-03-31T22:56:06.000Z
|
pcdsdevices/tests/test_ccm.py
|
vespos/pcdsdevices
|
7c4728df62ea58b6491d1cb36bb39d27d6dd9fca
|
[
"BSD-3-Clause-LBNL"
] | 38
|
2018-01-26T00:01:35.000Z
|
2022-02-17T00:48:55.000Z
|
import logging
import time
import numpy as np
import pytest
from ophyd.sim import fake_device_cache, make_fake_device
from .. import ccm
from ..sim import FastMotor
logger = logging.getLogger(__name__)
SAMPLE_ALIO = 4.575 # Current value as of writing this file
SAMPLE_THETA = 1.2 # Modest angle
SAMPLE_WAVELENGTH = 1.5 # hard xray
# Make sure the calcs are properly inverted
def test_theta_alio_inversion():
logger.debug('test_theta_alio_inversion')
theta = ccm.alio_to_theta(SAMPLE_ALIO, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
alio_calc = ccm.theta_to_alio(theta, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
# Unlike the other inversions, this is just an approximation
assert np.isclose(alio_calc, SAMPLE_ALIO)
def test_wavelength_theta_inversion():
logger.debug('test_wavelength_theta_inversion')
wavelength = ccm.theta_to_wavelength(SAMPLE_THETA, ccm.default_dspacing)
theta = ccm.wavelength_to_theta(wavelength, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(theta, SAMPLE_THETA)
theta = ccm.wavelength_to_theta(SAMPLE_WAVELENGTH, ccm.default_dspacing)
wavelength = ccm.theta_to_wavelength(theta, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(wavelength, SAMPLE_WAVELENGTH)
def test_energy_wavelength_inversion():
logger.debug('test_energy_wavelength_inversion')
energy = ccm.wavelength_to_energy(SAMPLE_WAVELENGTH)
wavelength_calc = ccm.energy_to_wavelength(energy)
assert wavelength_calc == SAMPLE_WAVELENGTH
@pytest.fixture(scope='function')
def fake_ccm():
return make_fake_ccm()
class FakeAlio(FastMotor):
kill = None
home = None
def make_fake_ccm():
fake_device_cache[ccm.CCMMotor] = FastMotor
fake_device_cache[ccm.CCMAlio] = FakeAlio
FakeCCM = make_fake_device(ccm.CCM)
fake_ccm = FakeCCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='fake_ccm')
def init_pos(mot, pos=0):
mot.user_readback.sim_put(0)
mot.user_setpoint.sim_put(0)
mot.user_setpoint.sim_set_limits((0, 0))
mot.motor_spg.sim_put(2)
mot.part_number.sim_put('tasdf')
init_pos(fake_ccm.x.down)
init_pos(fake_ccm.x.up)
init_pos(fake_ccm.y.down)
init_pos(fake_ccm.y.up_north)
init_pos(fake_ccm.y.up_south)
fake_ccm.alio.set(SAMPLE_ALIO)
fake_ccm.energy.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.vernier.setpoint.sim_put(0)
return fake_ccm
def test_fake_ccm(fake_ccm):
logger.debug('test_fake_ccm')
fake_ccm.get()
# Make sure we set up the forward/inverse to use the right methods
def test_ccm_calc(fake_ccm):
logger.debug('test_ccm_calc')
calc = fake_ccm.energy
logger.debug('physics pos is %s', calc.position)
logger.debug('real pos is %s', calc.real_position)
logger.debug('sample alio is %s', SAMPLE_ALIO)
theta_func = ccm.alio_to_theta(
SAMPLE_ALIO,
calc.theta0_rad_val,
calc.gr_val,
calc.gd_val,
)
wavelength_func = ccm.theta_to_wavelength(theta_func, calc.dspacing_val)
energy_func = ccm.wavelength_to_energy(wavelength_func)
energy = calc.energy.position
assert energy == energy_func
calc.alio.move(0)
calc.move(energy, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
calc.alio.move(calc.alio.position)
calc.move(energy=calc.energy.position, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
# Make sure sync'd axes work and that unk/in/out states work
@pytest.mark.timeout(5)
def test_ccm_main(fake_ccm):
logger.debug('test_ccm_main')
fake_ccm.y.move(5, wait=False)
assert fake_ccm.y.down.user_setpoint.get() == 5
assert fake_ccm.y.up_north.user_setpoint.get() == 5
assert fake_ccm.y.up_south.user_setpoint.get() == 5
assert fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(8)
fake_ccm.x.up.user_readback.sim_put(8)
assert not fake_ccm.removed
assert fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(4)
fake_ccm.x.up.user_readback.sim_put(4)
assert not fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.insert(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 8
assert fake_ccm.x.up.user_setpoint.get() == 8
fake_ccm.remove(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 0
assert fake_ccm.x.up.user_setpoint.get() == 0
@pytest.mark.timeout(5)
def test_vernier(fake_ccm):
logger.debug('test_vernier')
pseudopos = fake_ccm.energy_with_vernier
# Moving with vernier should move the energy request motor too
pseudopos.move(7, wait=False)
assert np.isclose(pseudopos.energy.position, 7)
assert pseudopos.vernier.position == 7000
pseudopos.move(8, wait=False)
assert np.isclose(pseudopos.energy.position, 8)
assert pseudopos.vernier.position == 8000
pseudopos.move(9, wait=False)
assert np.isclose(pseudopos.energy.position, 9)
assert pseudopos.vernier.position == 9000
# Small moves (less than 30eV) should be skipped on the energy request
pseudopos.move(9.001, wait=False)
assert np.isclose(pseudopos.energy.position, 9.001)
assert pseudopos.vernier.position == 9000
# Unless we set the option for not skipping them
pseudopos.vernier.skip_small_moves = False
pseudopos.move(9.002, wait=False)
assert np.isclose(pseudopos.energy.position, 9.002)
assert pseudopos.vernier.position == 9002
@pytest.mark.timeout(5)
def test_set_current_position(fake_ccm):
logger.debug('test_set_current_position')
mot = fake_ccm.energy.energy
for energy in range(6, 14):
mot.set_current_position(energy)
assert np.isclose(mot.position, energy)
@pytest.mark.timeout(5)
def test_check_valid_constant(fake_ccm):
logger.debug('test_check_valid_constant')
# First call to make_valid sends the first monitor update
def make_valid(sig, valid):
if valid:
sig.put(1)
else:
sig.put(0)
def make_conn(sig, conn):
sig._metadata['connected'] = conn
def output(sig):
return fake_ccm._check_valid_constant(sig, sig.get())
test_sig = fake_ccm.dspacing
# Can we get to all the enum values?
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.ALWAYS_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_CONNECT
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, True)
assert output(test_sig) == ccm.CCMConstantWarning.NO_WARNING
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.VALID_DISCONNECT
# theta0_deg is allowed to be zero, unlike the others
test_sig2 = fake_ccm.theta0_deg
make_conn(test_sig2, True)
make_valid(test_sig2, False)
assert output(test_sig2) == ccm.CCMConstantWarning.NO_WARNING
@pytest.mark.timeout(5)
def test_show_constant_warning(fake_ccm, caplog):
logger.debug('test_show_constant_warning')
for warning in (
ccm.CCMConstantWarning.NO_WARNING,
ccm.CCMConstantWarning.ALWAYS_DISCONNECT,
ccm.CCMConstantWarning.VALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_CONNECT,
):
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm._show_constant_warning(
warning,
fake_ccm.dspacing,
0.111111,
0.222222,
)
if warning == ccm.CCMConstantWarning.NO_WARNING:
assert len(caplog.records) == 0
else:
assert len(caplog.records) == 1
@pytest.mark.timeout(5)
def test_warn_invalid_constants(fake_ccm, caplog):
logger.debug('test_warn_invalid_constants')
# Trick the warning into thinking we've be initialized for a while
fake_ccm._init_time = time.monotonic() - 1000
fake_ccm.theta0_deg.put(0)
fake_ccm.dspacing.put(0)
fake_ccm.gr.put(0)
fake_ccm.gd.put(0)
# We expect three warnings from the fake PVs that start at 0
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
# We expect the warnings to not repeat
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=True)
assert len(caplog.records) == 0
# Unless we ask them to
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
# Let's fix the issue and make sure no warnings are shown
fake_ccm.reset_calc_constant_defaults(confirm=False)
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 0
@pytest.mark.timeout(5)
def test_disconnected_ccm():
ccm.CCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='ccm')
| 33.316498
| 78
| 0.700455
|
import logging
import time
import numpy as np
import pytest
from ophyd.sim import fake_device_cache, make_fake_device
from .. import ccm
from ..sim import FastMotor
logger = logging.getLogger(__name__)
SAMPLE_ALIO = 4.575
SAMPLE_THETA = 1.2
SAMPLE_WAVELENGTH = 1.5
def test_theta_alio_inversion():
logger.debug('test_theta_alio_inversion')
theta = ccm.alio_to_theta(SAMPLE_ALIO, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
alio_calc = ccm.theta_to_alio(theta, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
assert np.isclose(alio_calc, SAMPLE_ALIO)
def test_wavelength_theta_inversion():
logger.debug('test_wavelength_theta_inversion')
wavelength = ccm.theta_to_wavelength(SAMPLE_THETA, ccm.default_dspacing)
theta = ccm.wavelength_to_theta(wavelength, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(theta, SAMPLE_THETA)
theta = ccm.wavelength_to_theta(SAMPLE_WAVELENGTH, ccm.default_dspacing)
wavelength = ccm.theta_to_wavelength(theta, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(wavelength, SAMPLE_WAVELENGTH)
def test_energy_wavelength_inversion():
logger.debug('test_energy_wavelength_inversion')
energy = ccm.wavelength_to_energy(SAMPLE_WAVELENGTH)
wavelength_calc = ccm.energy_to_wavelength(energy)
assert wavelength_calc == SAMPLE_WAVELENGTH
@pytest.fixture(scope='function')
def fake_ccm():
return make_fake_ccm()
class FakeAlio(FastMotor):
kill = None
home = None
def make_fake_ccm():
fake_device_cache[ccm.CCMMotor] = FastMotor
fake_device_cache[ccm.CCMAlio] = FakeAlio
FakeCCM = make_fake_device(ccm.CCM)
fake_ccm = FakeCCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='fake_ccm')
def init_pos(mot, pos=0):
mot.user_readback.sim_put(0)
mot.user_setpoint.sim_put(0)
mot.user_setpoint.sim_set_limits((0, 0))
mot.motor_spg.sim_put(2)
mot.part_number.sim_put('tasdf')
init_pos(fake_ccm.x.down)
init_pos(fake_ccm.x.up)
init_pos(fake_ccm.y.down)
init_pos(fake_ccm.y.up_north)
init_pos(fake_ccm.y.up_south)
fake_ccm.alio.set(SAMPLE_ALIO)
fake_ccm.energy.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.vernier.setpoint.sim_put(0)
return fake_ccm
def test_fake_ccm(fake_ccm):
logger.debug('test_fake_ccm')
fake_ccm.get()
def test_ccm_calc(fake_ccm):
logger.debug('test_ccm_calc')
calc = fake_ccm.energy
logger.debug('physics pos is %s', calc.position)
logger.debug('real pos is %s', calc.real_position)
logger.debug('sample alio is %s', SAMPLE_ALIO)
theta_func = ccm.alio_to_theta(
SAMPLE_ALIO,
calc.theta0_rad_val,
calc.gr_val,
calc.gd_val,
)
wavelength_func = ccm.theta_to_wavelength(theta_func, calc.dspacing_val)
energy_func = ccm.wavelength_to_energy(wavelength_func)
energy = calc.energy.position
assert energy == energy_func
calc.alio.move(0)
calc.move(energy, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
calc.alio.move(calc.alio.position)
calc.move(energy=calc.energy.position, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
@pytest.mark.timeout(5)
def test_ccm_main(fake_ccm):
logger.debug('test_ccm_main')
fake_ccm.y.move(5, wait=False)
assert fake_ccm.y.down.user_setpoint.get() == 5
assert fake_ccm.y.up_north.user_setpoint.get() == 5
assert fake_ccm.y.up_south.user_setpoint.get() == 5
assert fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(8)
fake_ccm.x.up.user_readback.sim_put(8)
assert not fake_ccm.removed
assert fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(4)
fake_ccm.x.up.user_readback.sim_put(4)
assert not fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.insert(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 8
assert fake_ccm.x.up.user_setpoint.get() == 8
fake_ccm.remove(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 0
assert fake_ccm.x.up.user_setpoint.get() == 0
@pytest.mark.timeout(5)
def test_vernier(fake_ccm):
logger.debug('test_vernier')
pseudopos = fake_ccm.energy_with_vernier
# Moving with vernier should move the energy request motor too
pseudopos.move(7, wait=False)
assert np.isclose(pseudopos.energy.position, 7)
assert pseudopos.vernier.position == 7000
pseudopos.move(8, wait=False)
assert np.isclose(pseudopos.energy.position, 8)
assert pseudopos.vernier.position == 8000
pseudopos.move(9, wait=False)
assert np.isclose(pseudopos.energy.position, 9)
assert pseudopos.vernier.position == 9000
# Small moves (less than 30eV) should be skipped on the energy request
pseudopos.move(9.001, wait=False)
assert np.isclose(pseudopos.energy.position, 9.001)
assert pseudopos.vernier.position == 9000
# Unless we set the option for not skipping them
pseudopos.vernier.skip_small_moves = False
pseudopos.move(9.002, wait=False)
assert np.isclose(pseudopos.energy.position, 9.002)
assert pseudopos.vernier.position == 9002
@pytest.mark.timeout(5)
def test_set_current_position(fake_ccm):
logger.debug('test_set_current_position')
mot = fake_ccm.energy.energy
for energy in range(6, 14):
mot.set_current_position(energy)
assert np.isclose(mot.position, energy)
@pytest.mark.timeout(5)
def test_check_valid_constant(fake_ccm):
logger.debug('test_check_valid_constant')
# First call to make_valid sends the first monitor update
def make_valid(sig, valid):
if valid:
sig.put(1)
else:
sig.put(0)
def make_conn(sig, conn):
sig._metadata['connected'] = conn
def output(sig):
return fake_ccm._check_valid_constant(sig, sig.get())
test_sig = fake_ccm.dspacing
# Can we get to all the enum values?
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.ALWAYS_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_CONNECT
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, True)
assert output(test_sig) == ccm.CCMConstantWarning.NO_WARNING
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.VALID_DISCONNECT
# theta0_deg is allowed to be zero, unlike the others
test_sig2 = fake_ccm.theta0_deg
make_conn(test_sig2, True)
make_valid(test_sig2, False)
assert output(test_sig2) == ccm.CCMConstantWarning.NO_WARNING
@pytest.mark.timeout(5)
def test_show_constant_warning(fake_ccm, caplog):
logger.debug('test_show_constant_warning')
for warning in (
ccm.CCMConstantWarning.NO_WARNING,
ccm.CCMConstantWarning.ALWAYS_DISCONNECT,
ccm.CCMConstantWarning.VALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_CONNECT,
):
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm._show_constant_warning(
warning,
fake_ccm.dspacing,
0.111111,
0.222222,
)
if warning == ccm.CCMConstantWarning.NO_WARNING:
assert len(caplog.records) == 0
else:
assert len(caplog.records) == 1
@pytest.mark.timeout(5)
def test_warn_invalid_constants(fake_ccm, caplog):
logger.debug('test_warn_invalid_constants')
# Trick the warning into thinking we've be initialized for a while
fake_ccm._init_time = time.monotonic() - 1000
fake_ccm.theta0_deg.put(0)
fake_ccm.dspacing.put(0)
fake_ccm.gr.put(0)
fake_ccm.gd.put(0)
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=True)
assert len(caplog.records) == 0
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
fake_ccm.reset_calc_constant_defaults(confirm=False)
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 0
@pytest.mark.timeout(5)
def test_disconnected_ccm():
ccm.CCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='ccm')
| true
| true
|
f715c27add3916da3a4ba06ed5e3227bf392db8f
| 4,303
|
py
|
Python
|
spectree/models.py
|
loonateam/spectree
|
71b2d34993e01b36a8de18c2a3d6856d0c9e45c3
|
[
"Apache-2.0"
] | 183
|
2019-12-29T00:37:09.000Z
|
2022-03-15T20:37:53.000Z
|
spectree/models.py
|
0b01001001/spectree
|
35f17fe9694031a335223111d7fb38175d7e6e25
|
[
"Apache-2.0"
] | 102
|
2019-12-13T09:10:53.000Z
|
2022-03-15T06:21:29.000Z
|
spectree/models.py
|
loonateam/spectree
|
71b2d34993e01b36a8de18c2a3d6856d0c9e45c3
|
[
"Apache-2.0"
] | 51
|
2020-01-06T21:06:07.000Z
|
2022-03-19T16:10:58.000Z
|
import re
from enum import Enum
from typing import Any, Dict, Sequence
from pydantic import BaseModel, Field, root_validator, validator
# OpenAPI names validation regexp
OpenAPI_NAME_RE = re.compile(r"^[A-Za-z0-9-._]+")
class ExternalDocs(BaseModel):
description: str = ""
url: str
class Tag(BaseModel):
"""OpenAPI tag object"""
name: str
description: str = ""
externalDocs: ExternalDocs = None
def __str__(self):
return self.name
class UnprocessableEntityElement(BaseModel):
"""Model of missing field description."""
loc: Sequence[str] = Field(
...,
title="Missing field name",
)
msg: str = Field(
...,
title="Error message",
)
type: str = Field( # noqa: WPS125
...,
title="Error type",
)
ctx: Dict[str, Any] = Field(
None,
title="Error context",
)
class UnprocessableEntity(BaseModel):
"""Model of 422 Unprocessable Entity error."""
__root__: Sequence[UnprocessableEntityElement]
class SecureType(str, Enum):
HTTP = "http"
API_KEY = "apiKey"
OAUTH_TWO = "oauth2"
OPEN_ID_CONNECT = "openIdConnect"
class InType(str, Enum):
HEADER = "header"
QUERY = "query"
COOKIE = "cookie"
type_req_fields = {
SecureType.HTTP: ["scheme"],
SecureType.API_KEY: ["name", "field_in"],
SecureType.OAUTH_TWO: ["flows"],
SecureType.OPEN_ID_CONNECT: ["openIdConnectUrl"],
}
class SecuritySchemeData(BaseModel):
"""
Security scheme data
https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.3.md#securitySchemeObject
"""
type: SecureType = Field(..., description="Secure scheme type")
description: str = Field(
None,
description="A short description for security scheme.",
)
name: str = Field(
None,
description="The name of the header, query or cookie parameter to be used.",
)
field_in: InType = Field(
None, alias="in", description="The location of the API key."
)
scheme: str = Field(None, description="The name of the HTTP Authorization scheme.")
bearerFormat: str = Field(
None,
description=(
"A hint to the client to identify how the bearer token is formatted."
),
)
flows: dict = Field(
None,
description=(
"Containing configuration information for the flow types supported."
),
)
openIdConnectUrl: str = Field(
None, description="OpenId Connect URL to discover OAuth2 configuration values."
)
@root_validator()
def check_type_required_fields(cls, values: dict):
exist_fields = {key for key in values.keys() if values[key]}
if not values.get("type"):
raise ValueError("Type field is required")
if not set(type_req_fields[values["type"]]).issubset(exist_fields):
raise ValueError(
f"For `{values['type']}` type "
f"`{', '.join(type_req_fields[values['type']])}` field(s) is required."
)
return values
class Config:
validate_assignment = True
class SecurityScheme(BaseModel):
"""
Named security scheme
"""
name: str = Field(
...,
description="Custom security scheme name. Can only contain - [A-Za-z0-9-._]",
)
data: SecuritySchemeData = Field(..., description="Security scheme data")
@validator("name")
def check_name(cls, value: str):
if not OpenAPI_NAME_RE.fullmatch(value):
raise ValueError("Name not match OpenAPI rules")
return value
class Config:
validate_assignment = True
class Server(BaseModel):
"""
Servers section of OAS
"""
url: str = Field(
...,
description="""URL or path of API server
(may be parametrized with using \"variables\" section - for more information,
see: https://swagger.io/docs/specification/api-host-and-base-path/ )""",
)
description: str = Field(
None,
description="Custom server description for server URL",
)
variables: dict = Field(
None,
description="Variables for customizing server URL",
)
class Config:
validate_assignment = True
| 25.163743
| 97
| 0.614455
|
import re
from enum import Enum
from typing import Any, Dict, Sequence
from pydantic import BaseModel, Field, root_validator, validator
OpenAPI_NAME_RE = re.compile(r"^[A-Za-z0-9-._]+")
class ExternalDocs(BaseModel):
description: str = ""
url: str
class Tag(BaseModel):
name: str
description: str = ""
externalDocs: ExternalDocs = None
def __str__(self):
return self.name
class UnprocessableEntityElement(BaseModel):
loc: Sequence[str] = Field(
...,
title="Missing field name",
)
msg: str = Field(
...,
title="Error message",
)
type: str = Field(
...,
title="Error type",
)
ctx: Dict[str, Any] = Field(
None,
title="Error context",
)
class UnprocessableEntity(BaseModel):
__root__: Sequence[UnprocessableEntityElement]
class SecureType(str, Enum):
HTTP = "http"
API_KEY = "apiKey"
OAUTH_TWO = "oauth2"
OPEN_ID_CONNECT = "openIdConnect"
class InType(str, Enum):
HEADER = "header"
QUERY = "query"
COOKIE = "cookie"
type_req_fields = {
SecureType.HTTP: ["scheme"],
SecureType.API_KEY: ["name", "field_in"],
SecureType.OAUTH_TWO: ["flows"],
SecureType.OPEN_ID_CONNECT: ["openIdConnectUrl"],
}
class SecuritySchemeData(BaseModel):
type: SecureType = Field(..., description="Secure scheme type")
description: str = Field(
None,
description="A short description for security scheme.",
)
name: str = Field(
None,
description="The name of the header, query or cookie parameter to be used.",
)
field_in: InType = Field(
None, alias="in", description="The location of the API key."
)
scheme: str = Field(None, description="The name of the HTTP Authorization scheme.")
bearerFormat: str = Field(
None,
description=(
"A hint to the client to identify how the bearer token is formatted."
),
)
flows: dict = Field(
None,
description=(
"Containing configuration information for the flow types supported."
),
)
openIdConnectUrl: str = Field(
None, description="OpenId Connect URL to discover OAuth2 configuration values."
)
@root_validator()
def check_type_required_fields(cls, values: dict):
exist_fields = {key for key in values.keys() if values[key]}
if not values.get("type"):
raise ValueError("Type field is required")
if not set(type_req_fields[values["type"]]).issubset(exist_fields):
raise ValueError(
f"For `{values['type']}` type "
f"`{', '.join(type_req_fields[values['type']])}` field(s) is required."
)
return values
class Config:
validate_assignment = True
class SecurityScheme(BaseModel):
name: str = Field(
...,
description="Custom security scheme name. Can only contain - [A-Za-z0-9-._]",
)
data: SecuritySchemeData = Field(..., description="Security scheme data")
@validator("name")
def check_name(cls, value: str):
if not OpenAPI_NAME_RE.fullmatch(value):
raise ValueError("Name not match OpenAPI rules")
return value
class Config:
validate_assignment = True
class Server(BaseModel):
url: str = Field(
...,
description="""URL or path of API server
(may be parametrized with using \"variables\" section - for more information,
see: https://swagger.io/docs/specification/api-host-and-base-path/ )""",
)
description: str = Field(
None,
description="Custom server description for server URL",
)
variables: dict = Field(
None,
description="Variables for customizing server URL",
)
class Config:
validate_assignment = True
| true
| true
|
f715c2f9b4de2c046a801fa47e5dbf73f975953d
| 820
|
py
|
Python
|
phone_iso3166/network.py
|
foxkirov/phone-iso3166
|
8419091e906c439f9362690d7d2d02186098e5c4
|
[
"MIT"
] | 19
|
2017-03-28T10:35:22.000Z
|
2022-03-14T04:39:03.000Z
|
phone_iso3166/network.py
|
foxkirov/phone-iso3166
|
8419091e906c439f9362690d7d2d02186098e5c4
|
[
"MIT"
] | 17
|
2016-11-11T11:50:57.000Z
|
2021-06-22T09:32:17.000Z
|
phone_iso3166/network.py
|
foxkirov/phone-iso3166
|
8419091e906c439f9362690d7d2d02186098e5c4
|
[
"MIT"
] | 5
|
2015-09-28T18:25:38.000Z
|
2021-07-05T11:57:58.000Z
|
from .e212_names import operators, countries
from .errors import InvalidNetwork, InvalidCountry
def network(mcc, mnc):
'''
Returns a tuple (country, network_name), with country specified as
ISO-3166-1 alpha-2 code.
'''
mcc = int(mcc)
mnc = int(mnc)
try:
return operators[mcc][mnc]
except:
raise InvalidNetwork('Invalid MCC {} MNC {}'.format(mcc, mnc))
def country_networks(country):
'''
Returns a list of tuples (mcc, mnc, network_name) with all the networks
belonging to the specified country.
The country must be specified as an ISO-3166-1 alpha-2 code.
'''
try:
return [(m[0], m[1], operators[m[0]][m[1]][1])
for m in countries[country]]
except:
raise InvalidCountry('Invalid country {}'.format(country))
| 28.275862
| 75
| 0.636585
|
from .e212_names import operators, countries
from .errors import InvalidNetwork, InvalidCountry
def network(mcc, mnc):
mcc = int(mcc)
mnc = int(mnc)
try:
return operators[mcc][mnc]
except:
raise InvalidNetwork('Invalid MCC {} MNC {}'.format(mcc, mnc))
def country_networks(country):
try:
return [(m[0], m[1], operators[m[0]][m[1]][1])
for m in countries[country]]
except:
raise InvalidCountry('Invalid country {}'.format(country))
| true
| true
|
f715c364022f5d19e2b6087341499850fd3d9b4c
| 501
|
py
|
Python
|
polidoro_terminal/__init__.py
|
heitorpolidoro/py-terminal
|
3ef04d12aa48ef6d214598df34ddf932518f4614
|
[
"MIT"
] | null | null | null |
polidoro_terminal/__init__.py
|
heitorpolidoro/py-terminal
|
3ef04d12aa48ef6d214598df34ddf932518f4614
|
[
"MIT"
] | null | null | null |
polidoro_terminal/__init__.py
|
heitorpolidoro/py-terminal
|
3ef04d12aa48ef6d214598df34ddf932518f4614
|
[
"MIT"
] | null | null | null |
from polidoro_terminal.size import size, columns, rows
from polidoro_terminal.manipulation import erase_lines, up_lines, clear_to_end_of_line
from polidoro_terminal import cursor
from polidoro_terminal.color import Color
from polidoro_terminal.format import Format
from polidoro_terminal.question import question
NAME = 'polidoro_terminal'
VERSION = '0.0.2'
__all__ = ['size', 'columns', 'rows', 'erase_lines', 'up_lines', 'clear_to_end_of_line', 'cursor', 'Color',
'Format', 'question']
| 38.538462
| 107
| 0.784431
|
from polidoro_terminal.size import size, columns, rows
from polidoro_terminal.manipulation import erase_lines, up_lines, clear_to_end_of_line
from polidoro_terminal import cursor
from polidoro_terminal.color import Color
from polidoro_terminal.format import Format
from polidoro_terminal.question import question
NAME = 'polidoro_terminal'
VERSION = '0.0.2'
__all__ = ['size', 'columns', 'rows', 'erase_lines', 'up_lines', 'clear_to_end_of_line', 'cursor', 'Color',
'Format', 'question']
| true
| true
|
f715c4281d345e69ba8d6ddbc7b628af0ce8c87e
| 2,792
|
py
|
Python
|
plugins/stream.py
|
Morgawr/test-masterlinker-repo
|
0666403b7092f5ba434e1b8dbf3484ccb0cbe773
|
[
"MIT"
] | null | null | null |
plugins/stream.py
|
Morgawr/test-masterlinker-repo
|
0666403b7092f5ba434e1b8dbf3484ccb0cbe773
|
[
"MIT"
] | null | null | null |
plugins/stream.py
|
Morgawr/test-masterlinker-repo
|
0666403b7092f5ba434e1b8dbf3484ccb0cbe773
|
[
"MIT"
] | null | null | null |
#simple example of how a plugin should look like
#this plugin simply makes the bot responde with a simple string to every message received.
import time
import threading
import simplejson
import urllib2
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
class streamer():
def __init__(self, api_url, name, link):
self.api_url = api_url
self.name = name
self.online = False
self.link = link
class stream():
def __init__(self):
set_interval(self.check_streams, 60)
self.channel = "#vidyadev"
self.streamer_list = [streamer("https://api.twitch.tv/kraken/streams/argoneus", "argoneus", "http://www.twitch.tv/argoneus"), streamer("https://api.twitch.tv/kraken/streams/satatami", "Plesioth", "http://www.twitch.tv/satatami"), streamer("http://api.justin.tv/api/stream/list.json?channel=streamingstrandberg", "sstrandberg", "http://www.justin.tv/streamingstrandberg"), streamer("https://api.twitch.tv/kraken/streams/mechacrash", "MechaCrash", "http://www.twitch.tv/mechacrash"), streamer("https://api.twitch.tv/kraken/streams/mortvert_", "Mortvert", "http://twitch.tv/mortvert_")]
def check_streams(self):
for streamer in self.streamer_list:
self.check_stream(streamer)
def check_stream(self, streamer):
try:
result = simplejson.load(urllib2.urlopen(streamer.api_url))
except:
return None
if "twitch.tv" in streamer.api_url:
if result["stream"] and streamer.online is False:
streamer.online = True
title = ""
if (result["stream"])["game"]:
title = (result["stream"])["game"]
string = "\x033|STREAM| " + streamer.name + " is streaming " + title + " at " + streamer.link
if string:
self.main_ref.send_msg(self.channel, string[0:450])
else:
print "some error with stream: on user: " + streamer.name
elif streamer.online is True and not result["stream"]:
streamer.online = False
elif "justin.tv" in streamer.api_url:
if result and streamer.online is False:
streamer.online = True
string = "\x033|STREAM| " + streamer.name + " is streaming " + (result[0])["title"] + " at " + streamer.link
self.main_ref.send_msg(self.channel, string[0:450])
elif streamer.online is True and not result:
streamer.online = False
def stream(self, main_ref, msg_info):
self.main_ref = main_ref
return None
| 42.30303
| 591
| 0.606017
|
import time
import threading
import simplejson
import urllib2
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
class streamer():
def __init__(self, api_url, name, link):
self.api_url = api_url
self.name = name
self.online = False
self.link = link
class stream():
def __init__(self):
set_interval(self.check_streams, 60)
self.channel = "#vidyadev"
self.streamer_list = [streamer("https://api.twitch.tv/kraken/streams/argoneus", "argoneus", "http://www.twitch.tv/argoneus"), streamer("https://api.twitch.tv/kraken/streams/satatami", "Plesioth", "http://www.twitch.tv/satatami"), streamer("http://api.justin.tv/api/stream/list.json?channel=streamingstrandberg", "sstrandberg", "http://www.justin.tv/streamingstrandberg"), streamer("https://api.twitch.tv/kraken/streams/mechacrash", "MechaCrash", "http://www.twitch.tv/mechacrash"), streamer("https://api.twitch.tv/kraken/streams/mortvert_", "Mortvert", "http://twitch.tv/mortvert_")]
def check_streams(self):
for streamer in self.streamer_list:
self.check_stream(streamer)
def check_stream(self, streamer):
try:
result = simplejson.load(urllib2.urlopen(streamer.api_url))
except:
return None
if "twitch.tv" in streamer.api_url:
if result["stream"] and streamer.online is False:
streamer.online = True
title = ""
if (result["stream"])["game"]:
title = (result["stream"])["game"]
string = "\x033|STREAM| " + streamer.name + " is streaming " + title + " at " + streamer.link
if string:
self.main_ref.send_msg(self.channel, string[0:450])
else:
print "some error with stream: on user: " + streamer.name
elif streamer.online is True and not result["stream"]:
streamer.online = False
elif "justin.tv" in streamer.api_url:
if result and streamer.online is False:
streamer.online = True
string = "\x033|STREAM| " + streamer.name + " is streaming " + (result[0])["title"] + " at " + streamer.link
self.main_ref.send_msg(self.channel, string[0:450])
elif streamer.online is True and not result:
streamer.online = False
def stream(self, main_ref, msg_info):
self.main_ref = main_ref
return None
| false
| true
|
f715c430f48dcf933c9fde5179a7cfbfd6339883
| 8,381
|
py
|
Python
|
hubspot/crm/deals/models/batch_response_simple_public_object.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/deals/models/batch_response_simple_public_object.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/deals/models/batch_response_simple_public_object.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Deals
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.deals.configuration import Configuration
class BatchResponseSimplePublicObject(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'status': 'str',
'results': 'list[SimplePublicObject]',
'requested_at': 'datetime',
'started_at': 'datetime',
'completed_at': 'datetime',
'links': 'dict(str, str)'
}
attribute_map = {
'status': 'status',
'results': 'results',
'requested_at': 'requestedAt',
'started_at': 'startedAt',
'completed_at': 'completedAt',
'links': 'links'
}
def __init__(self, status=None, results=None, requested_at=None, started_at=None, completed_at=None, links=None, local_vars_configuration=None): # noqa: E501
"""BatchResponseSimplePublicObject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self._results = None
self._requested_at = None
self._started_at = None
self._completed_at = None
self._links = None
self.discriminator = None
self.status = status
self.results = results
if requested_at is not None:
self.requested_at = requested_at
self.started_at = started_at
self.completed_at = completed_at
if links is not None:
self.links = links
@property
def status(self):
"""Gets the status of this BatchResponseSimplePublicObject. # noqa: E501
:return: The status of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BatchResponseSimplePublicObject.
:param status: The status of this BatchResponseSimplePublicObject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["PENDING", "PROCESSING", "CANCELED", "COMPLETE"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def results(self):
"""Gets the results of this BatchResponseSimplePublicObject. # noqa: E501
:return: The results of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: list[SimplePublicObject]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this BatchResponseSimplePublicObject.
:param results: The results of this BatchResponseSimplePublicObject. # noqa: E501
:type: list[SimplePublicObject]
"""
if self.local_vars_configuration.client_side_validation and results is None: # noqa: E501
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
@property
def requested_at(self):
"""Gets the requested_at of this BatchResponseSimplePublicObject. # noqa: E501
:return: The requested_at of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: datetime
"""
return self._requested_at
@requested_at.setter
def requested_at(self, requested_at):
"""Sets the requested_at of this BatchResponseSimplePublicObject.
:param requested_at: The requested_at of this BatchResponseSimplePublicObject. # noqa: E501
:type: datetime
"""
self._requested_at = requested_at
@property
def started_at(self):
"""Gets the started_at of this BatchResponseSimplePublicObject. # noqa: E501
:return: The started_at of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this BatchResponseSimplePublicObject.
:param started_at: The started_at of this BatchResponseSimplePublicObject. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and started_at is None: # noqa: E501
raise ValueError("Invalid value for `started_at`, must not be `None`") # noqa: E501
self._started_at = started_at
@property
def completed_at(self):
"""Gets the completed_at of this BatchResponseSimplePublicObject. # noqa: E501
:return: The completed_at of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: datetime
"""
return self._completed_at
@completed_at.setter
def completed_at(self, completed_at):
"""Sets the completed_at of this BatchResponseSimplePublicObject.
:param completed_at: The completed_at of this BatchResponseSimplePublicObject. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and completed_at is None: # noqa: E501
raise ValueError("Invalid value for `completed_at`, must not be `None`") # noqa: E501
self._completed_at = completed_at
@property
def links(self):
"""Gets the links of this BatchResponseSimplePublicObject. # noqa: E501
:return: The links of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this BatchResponseSimplePublicObject.
:param links: The links of this BatchResponseSimplePublicObject. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchResponseSimplePublicObject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BatchResponseSimplePublicObject):
return True
return self.to_dict() != other.to_dict()
| 32.111111
| 162
| 0.624866
|
import pprint
import re
import six
from hubspot.crm.deals.configuration import Configuration
class BatchResponseSimplePublicObject(object):
openapi_types = {
'status': 'str',
'results': 'list[SimplePublicObject]',
'requested_at': 'datetime',
'started_at': 'datetime',
'completed_at': 'datetime',
'links': 'dict(str, str)'
}
attribute_map = {
'status': 'status',
'results': 'results',
'requested_at': 'requestedAt',
'started_at': 'startedAt',
'completed_at': 'completedAt',
'links': 'links'
}
def __init__(self, status=None, results=None, requested_at=None, started_at=None, completed_at=None, links=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self._results = None
self._requested_at = None
self._started_at = None
self._completed_at = None
self._links = None
self.discriminator = None
self.status = status
self.results = results
if requested_at is not None:
self.requested_at = requested_at
self.started_at = started_at
self.completed_at = completed_at
if links is not None:
self.links = links
@property
def status(self):
return self._status
@status.setter
def status(self, status):
if self.local_vars_configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
allowed_values = ["PENDING", "PROCESSING", "CANCELED", "COMPLETE"]
if self.local_vars_configuration.client_side_validation and status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def results(self):
return self._results
@results.setter
def results(self, results):
if self.local_vars_configuration.client_side_validation and results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._results = results
@property
def requested_at(self):
return self._requested_at
@requested_at.setter
def requested_at(self, requested_at):
self._requested_at = requested_at
@property
def started_at(self):
return self._started_at
@started_at.setter
def started_at(self, started_at):
if self.local_vars_configuration.client_side_validation and started_at is None:
raise ValueError("Invalid value for `started_at`, must not be `None`")
self._started_at = started_at
@property
def completed_at(self):
return self._completed_at
@completed_at.setter
def completed_at(self, completed_at):
if self.local_vars_configuration.client_side_validation and completed_at is None:
raise ValueError("Invalid value for `completed_at`, must not be `None`")
self._completed_at = completed_at
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BatchResponseSimplePublicObject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, BatchResponseSimplePublicObject):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f715c44f3f9b2781b37fa1bf5d47e32a81a7c1be
| 8,332
|
py
|
Python
|
SimPEG/electromagnetics/analytics/FDEM.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | 1
|
2022-02-18T16:31:27.000Z
|
2022-02-18T16:31:27.000Z
|
SimPEG/electromagnetics/analytics/FDEM.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | null | null | null |
SimPEG/electromagnetics/analytics/FDEM.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import utils
import warnings
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):
"""
The analytical expression is given in Equation 4.56 in Ward and Hohmann,
1988, and the example reproduces their Figure 4.2.
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from SimPEG import electromagnetics as EM
freq = np.logspace(-1, 5, 301)
test = EM.analytics.hzAnalyticDipoleF(
100, freq, 0.01, secondary=False)
plt.loglog(freq, test.real, 'C0-', label='Real')
plt.loglog(freq, -test.real, 'C0--')
plt.loglog(freq, test.imag, 'C1-', label='Imaginary')
plt.loglog(freq, -test.imag, 'C1--')
plt.title('Response at $r=100$ m')
plt.xlim([1e-1, 1e5])
plt.ylim([1e-12, 1e-6])
plt.xlabel('Frequency (Hz)')
plt.ylabel('$H_z$ (A/m)')
plt.legend(loc=6)
plt.show()
**Reference**
- Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
r = np.abs(r)
k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)
m = 1
front = m / (2.0 * np.pi * (k**2) * (r**5))
back = 9 - (
9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)
) * np.exp(-1j * k * r)
hz = front * back
if secondary:
hp = -1 / (4 * np.pi * r**3)
hz = hz - hp
if hz.ndim == 1:
hz = utils.mkvc(hz, 2)
return hz
def MagneticDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment, fieldType="b", mu_r=1, eps_r=1, **kwargs
):
"""
Analytical solution for a dipole in a whole-space.
The analytical expression is given in Equation 2.57 in Ward and Hohmann,
1988, and the example reproduces their Figure 2.2.
TODOs:
- set it up to instead take a mesh & survey
- add divide by zero safety
.. plot::
import numpy as np
from SimPEG import electromagnetics as EM
import matplotlib.pyplot as plt
from scipy.constants import mu_0
freqs = np.logspace(-2, 5, 301)
Bx, By, Bz = EM.analytics.FDEM.MagneticDipoleWholeSpace(
[0, 100, 0], [0, 0, 0], 1e-2, freqs, moment='Z')
plt.figure()
plt.loglog(freqs, Bz.real/mu_0, 'C0', label='Real')
plt.loglog(freqs, -Bz.real/mu_0, 'C0--')
plt.loglog(freqs, Bz.imag/mu_0, 'C1', label='Imaginary')
plt.loglog(freqs, -Bz.imag/mu_0, 'C1--')
plt.legend()
plt.xlim([1e-2, 1e5])
plt.ylim([1e-13, 1e-6])
plt.show()
**Reference**
- Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument",
)
magnitude = moment
moment = orient
else:
magnitude = 1
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
mu_r = mu / mu_0
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType in ["h", "b"]:
front = 1 / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
mid = -(kr**2.0) + 3.0 * 1j * kr + 3.0
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ my * ((dy * dx / r**2.0) * mid)
+ mz * ((dx * dz / r**2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r**2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ mz * ((dy * dz / r**2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r**2.0) * mid)
+ my * ((dy * dz / r**2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
)
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
elif fieldType == "e":
front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
return Fx, Fy, Fz
def ElectricDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment="X", fieldType="e", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument."
)
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
cur = kwargs.pop("current", None)
if cur is not None:
raise TypeError(
"current kwarg has been removed, please use the moment argument.",
)
else:
magnitude = 1
length = kwargs.pop("length", None)
if length is not None:
raise TypeError(
"length kwarg has been removed, please use the moment argument."
)
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment.upper() == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment.upper() == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment.upper() == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType == "e":
front = 1 / (4.0 * np.pi * sig * r**3) * np.exp(-1j * k * r)
mid = -(k**2) * r**2 + 3 * 1j * k * r + 3
Fx = front * (
mx * ((dx**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ my * (dy * dx / r**2) * mid
+ mz * (dz * dx / r**2) * mid
)
Fy = front * (
mx * (dx * dy / r**2) * mid
+ my * ((dy**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ mz * (dz * dy / r**2) * mid
)
Fz = front * (
mx * (dx * dz / r**2) * mid
+ my * (dy * dz / r**2) * mid
+ mz * ((dz**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
)
elif fieldType in ["h", "b"]:
front = (1 + 1j * kr) / (4.0 * np.pi * r**2) * np.exp(-1j * k * r)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
return Fx, Fy, Fz
| 30.079422
| 84
| 0.491959
|
from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import utils
import warnings
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):
r = np.abs(r)
k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)
m = 1
front = m / (2.0 * np.pi * (k**2) * (r**5))
back = 9 - (
9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)
) * np.exp(-1j * k * r)
hz = front * back
if secondary:
hp = -1 / (4 * np.pi * r**3)
hz = hz - hp
if hz.ndim == 1:
hz = utils.mkvc(hz, 2)
return hz
def MagneticDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment, fieldType="b", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument",
)
magnitude = moment
moment = orient
else:
magnitude = 1
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
mu_r = mu / mu_0
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType in ["h", "b"]:
front = 1 / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
mid = -(kr**2.0) + 3.0 * 1j * kr + 3.0
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ my * ((dy * dx / r**2.0) * mid)
+ mz * ((dx * dz / r**2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r**2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ mz * ((dy * dz / r**2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r**2.0) * mid)
+ my * ((dy * dz / r**2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
)
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
elif fieldType == "e":
front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
return Fx, Fy, Fz
def ElectricDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment="X", fieldType="e", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument."
)
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
cur = kwargs.pop("current", None)
if cur is not None:
raise TypeError(
"current kwarg has been removed, please use the moment argument.",
)
else:
magnitude = 1
length = kwargs.pop("length", None)
if length is not None:
raise TypeError(
"length kwarg has been removed, please use the moment argument."
)
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment.upper() == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment.upper() == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment.upper() == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType == "e":
front = 1 / (4.0 * np.pi * sig * r**3) * np.exp(-1j * k * r)
mid = -(k**2) * r**2 + 3 * 1j * k * r + 3
Fx = front * (
mx * ((dx**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ my * (dy * dx / r**2) * mid
+ mz * (dz * dx / r**2) * mid
)
Fy = front * (
mx * (dx * dy / r**2) * mid
+ my * ((dy**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ mz * (dz * dy / r**2) * mid
)
Fz = front * (
mx * (dx * dz / r**2) * mid
+ my * (dy * dz / r**2) * mid
+ mz * ((dz**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
)
elif fieldType in ["h", "b"]:
front = (1 + 1j * kr) / (4.0 * np.pi * r**2) * np.exp(-1j * k * r)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
return Fx, Fy, Fz
| true
| true
|
f715c4faf3c9fe1f421e85c3edcd776dc7e1569d
| 5,188
|
py
|
Python
|
test/functional/nulldummy.py
|
chx381/platopia
|
563c616db768f813aa4482d39d8ed1d8aacaad4f
|
[
"MIT"
] | 5
|
2018-07-21T15:58:30.000Z
|
2019-04-25T01:45:36.000Z
|
test/functional/nulldummy.py
|
chx381/platopia
|
563c616db768f813aa4482d39d8ed1d8aacaad4f
|
[
"MIT"
] | null | null | null |
test/functional/nulldummy.py
|
chx381/platopia
|
563c616db768f813aa4482d39d8ed1d8aacaad4f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
'''
This test is meant to exercise NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
'''
class NULLDUMMYTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])
NetworkThread().start() # Start up network handling in another thread
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info(
"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(
self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.tx_submit(self.nodes[0], test1txs[0])
test1txs.append(self.create_transaction(
self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.tx_submit(self.nodes[0], test1txs[1])
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info(
"Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(
self.nodes[0], txid2, self.ms_address, 48)
trueDummy(test2tx)
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
self.log.info(
"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{"txid": txid, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID")
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def tx_submit(self, node, tx, msg=""):
tx.rehash()
try:
node.sendrawtransaction(
bytes_to_hex_str(tx.serialize_with_witness()), True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], msg)
else:
assert_equal('', msg)
return tx.hash
def block_submit(self, node, txs, witness=False, accept=False):
block = create_block(self.tip, create_coinbase(
self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| 40.53125
| 125
| 0.644372
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])
NetworkThread().start()
self.coinbase_blocks = self.nodes[0].generate(2)
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427)
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info(
"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(
self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.tx_submit(self.nodes[0], test1txs[0])
test1txs.append(self.create_transaction(
self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.tx_submit(self.nodes[0], test1txs[1])
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info(
"Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(
self.nodes[0], txid2, self.ms_address, 48)
trueDummy(test2tx)
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
self.log.info(
"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{"txid": txid, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID")
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def tx_submit(self, node, tx, msg=""):
tx.rehash()
try:
node.sendrawtransaction(
bytes_to_hex_str(tx.serialize_with_witness()), True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], msg)
else:
assert_equal('', msg)
return tx.hash
def block_submit(self, node, txs, witness=False, accept=False):
block = create_block(self.tip, create_coinbase(
self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| true
| true
|
f715c501c1f5d7c019455fce3fc6397536a093ce
| 288
|
py
|
Python
|
universal/items.py
|
universalscraper/universal-spider
|
0b6d82ee0c749cf32dcf501e6d84f518ee2e8437
|
[
"MIT"
] | 2
|
2017-01-14T20:09:24.000Z
|
2019-09-23T09:26:23.000Z
|
universal/items.py
|
scraperize/universal-spider
|
0b6d82ee0c749cf32dcf501e6d84f518ee2e8437
|
[
"MIT"
] | null | null | null |
universal/items.py
|
scraperize/universal-spider
|
0b6d82ee0c749cf32dcf501e6d84f518ee2e8437
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class UniversalItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.2
| 51
| 0.6875
|
import scrapy
class UniversalItem(scrapy.Item):
pass
| true
| true
|
f715c602904311d44b8bf950698fcd77ad53a6a8
| 3,800
|
py
|
Python
|
img2pose/utils/renderer.py
|
jiacheng1gujiaxin/poseface
|
316924e224477f881240712a13a925bdd27adf4c
|
[
"MIT"
] | null | null | null |
img2pose/utils/renderer.py
|
jiacheng1gujiaxin/poseface
|
316924e224477f881240712a13a925bdd27adf4c
|
[
"MIT"
] | null | null | null |
img2pose/utils/renderer.py
|
jiacheng1gujiaxin/poseface
|
316924e224477f881240712a13a925bdd27adf4c
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from Sim3DR import RenderPipeline
from .pose_operations import plot_3d_landmark
def _to_ctype(arr):
if not arr.flags.c_contiguous:
return arr.copy(order="C")
return arr
def get_colors(img, ver):
h, w, _ = img.shape
ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1) # x
ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1) # y
ind = np.round(ver).astype(np.int32)
colors = img[ind[1, :], ind[0, :], :] / 255.0 # n x 3
return colors.copy()
class Renderer:
def __init__(
self,
vertices_path="../pose_references/vertices_trans.npy",
triangles_path="../pose_references/triangles.npy",
):
self.vertices = np.load(vertices_path)
self.triangles = _to_ctype(np.load(triangles_path).T)
self.vertices[:, 0] *= -1
self.cfg = {
"intensity_ambient": 0.3,
"color_ambient": (1, 1, 1),
"intensity_directional": 0.6,
"color_directional": (1, 1, 1),
"intensity_specular": 0.1,
"specular_exp": 5,
"light_pos": (0, 0, 5),
"view_pos": (0, 0, 5),
}
self.render_app = RenderPipeline(**self.cfg)
def transform_vertices(self, img, poses, global_intrinsics=None):
(w, h) = img.size
if global_intrinsics is None:
global_intrinsics = np.array(
[[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]
)
transformed_vertices = []
for pose in poses:
projected_lms = np.zeros_like(self.vertices)
projected_lms[:, :2], lms_3d_trans_proj = plot_3d_landmark(
self.vertices, pose, global_intrinsics
)
projected_lms[:, 2] = lms_3d_trans_proj[:, 2] * -1
range_x = np.max(projected_lms[:, 0]) - np.min(projected_lms[:, 0])
range_y = np.max(projected_lms[:, 1]) - np.min(projected_lms[:, 1])
s = (h + w) / pose[5]
projected_lms[:, 2] *= s
projected_lms[:, 2] += (range_x + range_y) * 3
transformed_vertices.append(projected_lms)
return transformed_vertices
def render(self, img, transformed_vertices, alpha=0.9, save_path=None):
img = np.asarray(img)
overlap = img.copy()
for vertices in transformed_vertices:
vertices = _to_ctype(vertices) # transpose
overlap = self.render_app(vertices, self.triangles, overlap)
res = cv2.addWeighted(img, 1 - alpha, overlap, alpha, 0)
if save_path is not None:
cv2.imwrite(save_path, res)
print(f"Save visualization result to {save_path}")
return res
def save_to_obj(self, img, ver_lst, height, save_path):
n_obj = len(ver_lst) # count obj
if n_obj <= 0:
return
n_vertex = ver_lst[0].T.shape[1]
n_face = self.triangles.shape[0]
with open(save_path, "w") as f:
for i in range(n_obj):
ver = ver_lst[i].T
colors = get_colors(img, ver)
for j in range(n_vertex):
x, y, z = ver[:, j]
f.write(
f"v {x:.2f} {height - y:.2f} {z:.2f} {colors[j, 2]:.2f} "
f"{colors[j, 1]:.2f} {colors[j, 0]:.2f}\n"
)
for i in range(n_obj):
offset = i * n_vertex
for j in range(n_face):
idx1, idx2, idx3 = self.triangles[j] # m x 3
f.write(
f"f {idx3 + 1 + offset} {idx2 + 1 + offset} "
f"{idx1 + 1 + offset}\n"
)
print(f"Dump tp {save_path}")
| 31.666667
| 81
| 0.517105
|
import cv2
import numpy as np
from Sim3DR import RenderPipeline
from .pose_operations import plot_3d_landmark
def _to_ctype(arr):
if not arr.flags.c_contiguous:
return arr.copy(order="C")
return arr
def get_colors(img, ver):
h, w, _ = img.shape
ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1)
ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1)
ind = np.round(ver).astype(np.int32)
colors = img[ind[1, :], ind[0, :], :] / 255.0
return colors.copy()
class Renderer:
def __init__(
self,
vertices_path="../pose_references/vertices_trans.npy",
triangles_path="../pose_references/triangles.npy",
):
self.vertices = np.load(vertices_path)
self.triangles = _to_ctype(np.load(triangles_path).T)
self.vertices[:, 0] *= -1
self.cfg = {
"intensity_ambient": 0.3,
"color_ambient": (1, 1, 1),
"intensity_directional": 0.6,
"color_directional": (1, 1, 1),
"intensity_specular": 0.1,
"specular_exp": 5,
"light_pos": (0, 0, 5),
"view_pos": (0, 0, 5),
}
self.render_app = RenderPipeline(**self.cfg)
def transform_vertices(self, img, poses, global_intrinsics=None):
(w, h) = img.size
if global_intrinsics is None:
global_intrinsics = np.array(
[[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]
)
transformed_vertices = []
for pose in poses:
projected_lms = np.zeros_like(self.vertices)
projected_lms[:, :2], lms_3d_trans_proj = plot_3d_landmark(
self.vertices, pose, global_intrinsics
)
projected_lms[:, 2] = lms_3d_trans_proj[:, 2] * -1
range_x = np.max(projected_lms[:, 0]) - np.min(projected_lms[:, 0])
range_y = np.max(projected_lms[:, 1]) - np.min(projected_lms[:, 1])
s = (h + w) / pose[5]
projected_lms[:, 2] *= s
projected_lms[:, 2] += (range_x + range_y) * 3
transformed_vertices.append(projected_lms)
return transformed_vertices
def render(self, img, transformed_vertices, alpha=0.9, save_path=None):
img = np.asarray(img)
overlap = img.copy()
for vertices in transformed_vertices:
vertices = _to_ctype(vertices)
overlap = self.render_app(vertices, self.triangles, overlap)
res = cv2.addWeighted(img, 1 - alpha, overlap, alpha, 0)
if save_path is not None:
cv2.imwrite(save_path, res)
print(f"Save visualization result to {save_path}")
return res
def save_to_obj(self, img, ver_lst, height, save_path):
n_obj = len(ver_lst)
if n_obj <= 0:
return
n_vertex = ver_lst[0].T.shape[1]
n_face = self.triangles.shape[0]
with open(save_path, "w") as f:
for i in range(n_obj):
ver = ver_lst[i].T
colors = get_colors(img, ver)
for j in range(n_vertex):
x, y, z = ver[:, j]
f.write(
f"v {x:.2f} {height - y:.2f} {z:.2f} {colors[j, 2]:.2f} "
f"{colors[j, 1]:.2f} {colors[j, 0]:.2f}\n"
)
for i in range(n_obj):
offset = i * n_vertex
for j in range(n_face):
idx1, idx2, idx3 = self.triangles[j]
f.write(
f"f {idx3 + 1 + offset} {idx2 + 1 + offset} "
f"{idx1 + 1 + offset}\n"
)
print(f"Dump tp {save_path}")
| true
| true
|
f715c633d888342e2bcb33e9b3f302a45f208031
| 6,067
|
py
|
Python
|
servers/Thot/schema.py
|
DiegoCorrea/bottleOfMessages
|
1281d3f82ce4d44a31e426aa8862c3c9b294cf03
|
[
"MIT"
] | null | null | null |
servers/Thot/schema.py
|
DiegoCorrea/bottleOfMessages
|
1281d3f82ce4d44a31e426aa8862c3c9b294cf03
|
[
"MIT"
] | null | null | null |
servers/Thot/schema.py
|
DiegoCorrea/bottleOfMessages
|
1281d3f82ce4d44a31e426aa8862c3c9b294cf03
|
[
"MIT"
] | null | null | null |
import sqlite3
import sys
import os
import inspect
from time import gmtime, strftime
from config.server import APP_DB_PATH, SERVER_DB_PATH, WHO_AM_I
sys.path.append('..')
# conectando...
conn = sqlite3.connect(
os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe()
)
)
) + APP_DB_PATH[1:]
)
# definindo um cursor
cursor = conn.cursor()
print(' -'*30)
print(' + name: ', WHO_AM_I['name'])
print(' + db-name: ', WHO_AM_I['db-name'])
print(' + ip: ', WHO_AM_I['ip'])
print(' + port: ', WHO_AM_I['port'])
print(' + position: ', WHO_AM_I['position'])
print(' + succession_order: ', WHO_AM_I['succession_order'])
print(' -'*30)
print('Deletando Tabelas se Existe')
cursor.execute("""
DROP TABLE IF EXISTS users;
""")
cursor.execute("""
DROP TABLE IF EXISTS contacts;
""")
cursor.execute("""
DROP TABLE IF EXISTS chats;
""")
cursor.execute("""
DROP TABLE IF EXISTS chat_messages;
""")
cursor.execute("""
DROP TABLE IF EXISTS groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS user_groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS group_messages;
""")
print('...Ok!')
# #########################################################3 #
# criando a tabela (schema)
print('Users')
cursor.execute("""
CREATE TABLE IF NOT EXISTS users (
email CHAR(64) NOT NULL PRIMARY KEY,
name VARCHAR(45) NOT NULL,
created_at TEXT NOT NULL
);
""")
print('...Ok!')
print('Contacts')
cursor.execute("""
CREATE TABLE IF NOT EXISTS contacts (
id CHAR(32) NOT NULL PRIMARY KEY,
user_id CHAR(64) NOT NULL,
contact_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(email),
FOREIGN KEY(contact_id) REFERENCES users(email)
);
""")
print('...Ok!')
print('Chats')
cursor.execute("""
CREATE TABLE IF NOT EXISTS chats (
id CHAR(32) NOT NULL PRIMARY KEY,
user_id CHAR(64) NOT NULL,
contact_id CHAR(64) NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(email),
FOREIGN KEY(contact_id) REFERENCES users(email)
);
""")
print('...Ok!')
print('Chat Message')
cursor.execute("""
CREATE TABLE IF NOT EXISTS chat_messages (
id CHAR(32) NOT NULL PRIMARY KEY,
chat_id CHAR(32) NOT NULL,
sender_id CHAR(64) NOT NULL,
message TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(sender_id) REFERENCES users(email),
FOREIGN KEY(chat_id) REFERENCES chats(id)
);
""")
print('...Ok!')
print('Groups ')
cursor.execute("""
CREATE TABLE IF NOT EXISTS groups (
id CHAR(32) NOT NULL PRIMARY KEY,
name CHAR(32) NOT NULL,
created_at TEXT NOT NULL
);
""")
print('...Ok!')
print('Users Groups ')
cursor.execute("""
CREATE TABLE IF NOT EXISTS user_groups (
id CHAR(32) NOT NULL PRIMARY KEY,
user_id CHAR(64)NOT NULL,
group_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id),
FOREIGN KEY(group_id) REFERENCES groups(id)
);
""")
print('...OK!')
print('Group Messages')
cursor.execute("""
CREATE TABLE IF NOT EXISTS group_messages (
id CHAR(32) NOT NULL PRIMARY KEY,
sender_id CHAR(64) NOT NULL,
group_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
message TEXT NOT NULL,
FOREIGN KEY(sender_id) REFERENCES users(id),
FOREIGN KEY(group_id) REFERENCES groups(id)
);
""")
print('...OK!')
print('Tabelas criadas com sucesso.')
# desconectando...
conn.close()
# ##################################################################### #
print ('\n\n')
# conectando...
conn = sqlite3.connect(
os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe()
)
)
) + SERVER_DB_PATH[1:]
)
# definindo um cursor
cursor = conn.cursor()
print('Deletando Tabelas de Servers se Existe')
cursor.execute("""
DROP TABLE IF EXISTS default_servers_list;
""")
cursor.execute("""
DROP TABLE IF EXISTS worker_servers_list;
""")
cursor.execute("""
DROP TABLE IF EXISTS suspect_servers_list;
""")
cursor.execute("""
DROP TABLE IF EXISTS round_times;
""")
print('...Ok!')
print('Default Server List')
cursor.execute("""
CREATE TABLE IF NOT EXISTS default_servers_list (
name CHAR(64) NOT NULL,
ip VARCHAR(32) NOT NULL,
port INTEGER NOT NULL,
succession_order INTEGER NOT NULL
);
""")
conn.commit()
cursor.execute("""
INSERT INTO default_servers_list
(ip, name, port, succession_order)
VALUES ('192.168.0.16', 'Hermes', 27001, 1);
""")
conn.commit()
cursor.execute("""
INSERT INTO default_servers_list
(ip, name, port, succession_order)
VALUES ('192.168.0.17', 'Thot', 27002, 2);
""")
conn.commit()
cursor.execute("""
INSERT INTO default_servers_list
(ip, name, port, succession_order)
VALUES ('192.168.0.10', 'Exu', 27000, 0);
""")
conn.commit()
print('...OK!')
print('Worker Server List')
cursor.execute("""
CREATE TABLE IF NOT EXISTS workers_servers_list (
name CHAR(64) NOT NULL,
ip VARCHAR(32) NOT NULL,
port INTEGER NOT NULL,
succession_order INTEGER NOT NULL
);
""")
conn.commit()
print('...OK!')
print('Suspect Server List')
cursor.execute("""
CREATE TABLE IF NOT EXISTS suspects_servers_list (
name CHAR(64) NOT NULL,
ip VARCHAR(32) NOT NULL,
port INTEGER NOT NULL
);
""")
conn.commit()
print('...OK!')
print('Round Times')
cursor.execute("""
CREATE TABLE IF NOT EXISTS round_times (
_round INTEGER NOT NULL PRIMARY KEY,
created_at TEXT NOT NULL
);
""")
conn.commit()
cursor.execute("""
INSERT INTO round_times
(_round, created_at)
VALUES (?, ?);
""", (
0,
strftime(
"%Y-%m-%d %H:%M:%S",
gmtime()
)
)
)
conn.commit()
print('...OK!')
# desconectando...
conn.close()
| 23.885827
| 73
| 0.606725
|
import sqlite3
import sys
import os
import inspect
from time import gmtime, strftime
from config.server import APP_DB_PATH, SERVER_DB_PATH, WHO_AM_I
sys.path.append('..')
conn = sqlite3.connect(
os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe()
)
)
) + APP_DB_PATH[1:]
)
cursor = conn.cursor()
print(' -'*30)
print(' + name: ', WHO_AM_I['name'])
print(' + db-name: ', WHO_AM_I['db-name'])
print(' + ip: ', WHO_AM_I['ip'])
print(' + port: ', WHO_AM_I['port'])
print(' + position: ', WHO_AM_I['position'])
print(' + succession_order: ', WHO_AM_I['succession_order'])
print(' -'*30)
print('Deletando Tabelas se Existe')
cursor.execute("""
DROP TABLE IF EXISTS users;
""")
cursor.execute("""
DROP TABLE IF EXISTS contacts;
""")
cursor.execute("""
DROP TABLE IF EXISTS chats;
""")
cursor.execute("""
DROP TABLE IF EXISTS chat_messages;
""")
cursor.execute("""
DROP TABLE IF EXISTS groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS user_groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS group_messages;
""")
print('...Ok!')
REFERENCES groups(id)
);
""")
print('...OK!')
print('Group Messages')
cursor.execute("""
CREATE TABLE IF NOT EXISTS group_messages (
id CHAR(32) NOT NULL PRIMARY KEY,
sender_id CHAR(64) NOT NULL,
group_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
message TEXT NOT NULL,
FOREIGN KEY(sender_id) REFERENCES users(id),
FOREIGN KEY(group_id) REFERENCES groups(id)
);
""")
print('...OK!')
print('Tabelas criadas com sucesso.')
conn.close()
| true
| true
|
f715c70e4981ec385e1f2070cf75f75007655155
| 293
|
py
|
Python
|
sololearn/NewDriverLicense/DL.py
|
SneakyWizards/HackerRankSolutions
|
daf494e7775bb0de5afcfdcfd45aa73e6a950e0e
|
[
"RSA-MD"
] | 3
|
2020-01-08T18:33:11.000Z
|
2022-02-08T00:38:26.000Z
|
sololearn/NewDriverLicense/DL.py
|
SneakyWizards/HackerRankSolutions
|
daf494e7775bb0de5afcfdcfd45aa73e6a950e0e
|
[
"RSA-MD"
] | null | null | null |
sololearn/NewDriverLicense/DL.py
|
SneakyWizards/HackerRankSolutions
|
daf494e7775bb0de5afcfdcfd45aa73e6a950e0e
|
[
"RSA-MD"
] | 4
|
2020-08-08T22:02:23.000Z
|
2022-02-07T17:40:15.000Z
|
#!/usr/bin/python
name = input()
num_agents = int(input())
drivers = input().split()
drivers.append(name)
drivers.sort()
index = drivers.index(name) + 1
if num_agents > index:
num_agents = index
rem = index % num_agents
div = index // num_agents
time = (rem + div) * 20
print(time)
| 14.65
| 31
| 0.665529
|
name = input()
num_agents = int(input())
drivers = input().split()
drivers.append(name)
drivers.sort()
index = drivers.index(name) + 1
if num_agents > index:
num_agents = index
rem = index % num_agents
div = index // num_agents
time = (rem + div) * 20
print(time)
| true
| true
|
f715c76c0e7bc0f285f65f27afbb7bee42da3afb
| 805
|
py
|
Python
|
server/urls.py
|
w769076810/myhome
|
38e39b15c84f8c60fe3f02b46053a8971e081b9a
|
[
"MIT"
] | null | null | null |
server/urls.py
|
w769076810/myhome
|
38e39b15c84f8c60fe3f02b46053a8971e081b9a
|
[
"MIT"
] | null | null | null |
server/urls.py
|
w769076810/myhome
|
38e39b15c84f8c60fe3f02b46053a8971e081b9a
|
[
"MIT"
] | null | null | null |
"""server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from server import views
urlpatterns = [
# path('admin/', admin.site.urls),
path('test/', views.test)
]
| 33.541667
| 77
| 0.70559
|
from django.contrib import admin
from django.urls import path
from server import views
urlpatterns = [
path('test/', views.test)
]
| true
| true
|
f715c84088f7c8d2c89e008f545880f78639ed19
| 17,970
|
py
|
Python
|
ansible/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: netapp_e_volume_copy
short_description: Create volume copy pairs
description:
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
source_volume_id:
description:
- The the id of the volume copy source.
- If used, must be paired with destination_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
destination_volume_id:
description:
- The the id of the volume copy destination.
- If used, must be paired with source_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
volume_copy_pair_id:
description:
- The the id of a given volume copy pair
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
- Can use to delete or check presence of volume pairs
- Must specify this or (destination_volume_id and source_volume_id)
state:
description:
- Whether the specified volume copy pair should exist or not.
required: True
choices: ['present', 'absent']
create_copy_pair_if_does_not_exist:
description:
- Defines if a copy pair will be created if it does not exist.
- If set to True destination_volume_id and source_volume_id are required.
choices: [True, False]
default: True
start_stop_copy:
description:
- starts a re-copy or stops a copy in progress
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
- Requires volume_copy_pair_id
search_volume_id:
description:
- Searches for all valid potential target and source volumes that could be used in a copy_pair
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
"""
RESULTS = """
"""
EXAMPLES = """
---
msg:
description: Success message
returned: success
type: string
sample: Json facts for the volume copy that was created.
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: Created Volume Copy Pair with ID
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
volume_copy_pair_id = None
for potential_copy_pair in resp:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
volume_copy_pair_id = potential_copy_pair['id']
return volume_copy_pair_id
def create_copy_pair(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
rData = {
"sourceId": params['source_volume_id'],
"targetId": params['destination_volume_id']
}
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def delete_copy_pair_by_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 204:
return False, (rc, resp)
else:
return True, (rc, resp)
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def start_stop_copy(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
return True, response_data[0]['percentComplete']
else:
return False, response_data
def check_copy_status(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
if response_data['percentComplete'] != -1:
return True, response_data['percentComplete']
else:
return False, response_data['percentComplete']
else:
return False, response_data
def find_valid_copy_pair_targets_and_sources(params):
get_status = 'storage-systems/%s/volumes' % params['ssid']
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
source_capacity = None
candidates = []
for volume in response_data:
if volume['id'] == params['search_volume_id']:
source_capacity = volume['capacity']
else:
candidates.append(volume)
potential_sources = []
potential_targets = []
for volume in candidates:
if volume['capacity'] > source_capacity:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_targets.append(volume['id'])
else:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_sources.append(volume['id'])
return potential_targets, potential_sources
else:
raise Exception("Response [%s]" % response_code)
def main():
module = AnsibleModule(argument_spec=dict(
source_volume_id=dict(type='str'),
destination_volume_id=dict(type='str'),
copy_priority=dict(required=False, default=0, type='int'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
targetWriteProtected=dict(required=False, default=True, type='bool'),
onlineCopy=dict(required=False, default=False, type='bool'),
volume_copy_pair_id=dict(type='str'),
status=dict(required=True, choices=['present', 'absent'], type='str'),
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
search_volume_id=dict(type='str'),
),
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
['volume_copy_pair_id', 'source_volume_id'],
['volume_copy_pair_id', 'search_volume_id'],
['search_volume_id', 'destination_volume_id'],
['search_volume_id', 'source_volume_id'],
],
required_together=[['source_volume_id', 'destination_volume_id'],
],
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
]
)
params = module.params
if not params['api_url'].endswith('/'):
params['api_url'] += '/'
# Check if we want to search
if params['search_volume_id'] is not None:
try:
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
except:
e = get_exception()
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e))
module.exit_json(changed=False,
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
search_volume_id=params['search_volume_id'],
valid_targets=potential_targets,
valid_sources=potential_sources)
# Check if we want to start or stop a copy operation
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
# Get the current status info
currenty_running, status_info = check_copy_status(params)
# If we want to start
if params['start_stop_copy'] == 'start':
# If we have already started
if currenty_running is True:
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
# If we need to start
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
else:
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
# If we want to stop
else:
# If it has already stopped
if currenty_running is False:
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
# If we need to stop it
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
# If we want the copy pair to exist we do this stuff
if params['status'] == 'present':
# We need to check if it exists first
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# If no volume copy pair is found we need need to make it.
if params['volume_copy_pair_id'] is None:
# In order to create we can not do so with just a volume_copy_pair_id
copy_began_status, (rc, resp) = create_copy_pair(params)
if copy_began_status is True:
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
else:
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
# If it does exist we do nothing
else:
# We verify that it exists
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
params)
if exist_status:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
else:
if exist_status_code == 404:
module.fail_json(
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
exist_status_code, exist_status_data))
module.fail_json(msg="Done")
# If we want it to not exist we do this
else:
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# We delete it by the volume_copy_pair_id
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
if delete_status is True:
module.exit_json(changed=True,
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
else:
if delete_status_code == 404:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
delete_status_code, delete_status_data))
if __name__ == '__main__':
main()
| 40.840909
| 145
| 0.617641
|
DOCUMENTATION = """
---
module: netapp_e_volume_copy
short_description: Create volume copy pairs
description:
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
source_volume_id:
description:
- The the id of the volume copy source.
- If used, must be paired with destination_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
destination_volume_id:
description:
- The the id of the volume copy destination.
- If used, must be paired with source_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
volume_copy_pair_id:
description:
- The the id of a given volume copy pair
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
- Can use to delete or check presence of volume pairs
- Must specify this or (destination_volume_id and source_volume_id)
state:
description:
- Whether the specified volume copy pair should exist or not.
required: True
choices: ['present', 'absent']
create_copy_pair_if_does_not_exist:
description:
- Defines if a copy pair will be created if it does not exist.
- If set to True destination_volume_id and source_volume_id are required.
choices: [True, False]
default: True
start_stop_copy:
description:
- starts a re-copy or stops a copy in progress
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
- Requires volume_copy_pair_id
search_volume_id:
description:
- Searches for all valid potential target and source volumes that could be used in a copy_pair
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
"""
RESULTS = """
"""
EXAMPLES = """
---
msg:
description: Success message
returned: success
type: string
sample: Json facts for the volume copy that was created.
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: Created Volume Copy Pair with ID
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
volume_copy_pair_id = None
for potential_copy_pair in resp:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
volume_copy_pair_id = potential_copy_pair['id']
return volume_copy_pair_id
def create_copy_pair(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
rData = {
"sourceId": params['source_volume_id'],
"targetId": params['destination_volume_id']
}
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def delete_copy_pair_by_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 204:
return False, (rc, resp)
else:
return True, (rc, resp)
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def start_stop_copy(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
return True, response_data[0]['percentComplete']
else:
return False, response_data
def check_copy_status(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
if response_data['percentComplete'] != -1:
return True, response_data['percentComplete']
else:
return False, response_data['percentComplete']
else:
return False, response_data
def find_valid_copy_pair_targets_and_sources(params):
get_status = 'storage-systems/%s/volumes' % params['ssid']
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
source_capacity = None
candidates = []
for volume in response_data:
if volume['id'] == params['search_volume_id']:
source_capacity = volume['capacity']
else:
candidates.append(volume)
potential_sources = []
potential_targets = []
for volume in candidates:
if volume['capacity'] > source_capacity:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_targets.append(volume['id'])
else:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_sources.append(volume['id'])
return potential_targets, potential_sources
else:
raise Exception("Response [%s]" % response_code)
def main():
module = AnsibleModule(argument_spec=dict(
source_volume_id=dict(type='str'),
destination_volume_id=dict(type='str'),
copy_priority=dict(required=False, default=0, type='int'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
targetWriteProtected=dict(required=False, default=True, type='bool'),
onlineCopy=dict(required=False, default=False, type='bool'),
volume_copy_pair_id=dict(type='str'),
status=dict(required=True, choices=['present', 'absent'], type='str'),
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
search_volume_id=dict(type='str'),
),
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
['volume_copy_pair_id', 'source_volume_id'],
['volume_copy_pair_id', 'search_volume_id'],
['search_volume_id', 'destination_volume_id'],
['search_volume_id', 'source_volume_id'],
],
required_together=[['source_volume_id', 'destination_volume_id'],
],
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
]
)
params = module.params
if not params['api_url'].endswith('/'):
params['api_url'] += '/'
if params['search_volume_id'] is not None:
try:
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
except:
e = get_exception()
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e))
module.exit_json(changed=False,
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
search_volume_id=params['search_volume_id'],
valid_targets=potential_targets,
valid_sources=potential_sources)
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
currenty_running, status_info = check_copy_status(params)
if params['start_stop_copy'] == 'start':
if currenty_running is True:
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
else:
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
else:
if currenty_running is False:
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
if params['status'] == 'present':
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
if params['volume_copy_pair_id'] is None:
copy_began_status, (rc, resp) = create_copy_pair(params)
if copy_began_status is True:
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
else:
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
else:
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
params)
if exist_status:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
else:
if exist_status_code == 404:
module.fail_json(
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
exist_status_code, exist_status_data))
module.fail_json(msg="Done")
else:
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
if delete_status is True:
module.exit_json(changed=True,
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
else:
if delete_status_code == 404:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
delete_status_code, delete_status_data))
if __name__ == '__main__':
main()
| true
| true
|
f715ca8ab55b1a3d741b6e97e2473b7911154537
| 708
|
py
|
Python
|
scripts/basic_support/robot_patrol_test.py
|
liminglong/micros_mars_task_alloc
|
9b216e5494dbff6abd7b4c74eb72fc35eb392ca3
|
[
"BSD-3-Clause"
] | 4
|
2016-06-15T02:44:43.000Z
|
2021-12-20T15:43:32.000Z
|
scripts/basic_support/robot_patrol_test.py
|
liminglong/micros_mars_task_alloc
|
9b216e5494dbff6abd7b4c74eb72fc35eb392ca3
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/basic_support/robot_patrol_test.py
|
liminglong/micros_mars_task_alloc
|
9b216e5494dbff6abd7b4c74eb72fc35eb392ca3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
__author__ = 'Minglong Li'
#import sys
#sys.path.append("~/catkin_ws/src/multi_robot_patrol/scripts/basic_support")
from robot_patrol_area_0 import RobotPatrolArea0
from robot_patrol_area_1 import RobotPatrolArea1
from robot_patrol_area_2 import RobotPatrolArea2
from motivational_behavior import MotivationalBehavior
from switch import Switch
from std_msgs.msg import Bool
#ob1 = RobotPatrolArea0()
ob2 = RobotPatrolArea1()
ob3 = RobotPatrolArea2()
#ob1.start()
ob2.start()
ob3.start()
#ob4 = MotivationalBehavior('mb0',0,0,'switch0/activate')#nodename,robotid,behaviorid
#ob4.start()
#ob5 = Switch('switch0','topic01',Bool,'topic01s')#nodename,subtopic,type,pubtopic
#ob5.start()
| 28.32
| 85
| 0.80226
|
__author__ = 'Minglong Li'
from robot_patrol_area_0 import RobotPatrolArea0
from robot_patrol_area_1 import RobotPatrolArea1
from robot_patrol_area_2 import RobotPatrolArea2
from motivational_behavior import MotivationalBehavior
from switch import Switch
from std_msgs.msg import Bool
ob2 = RobotPatrolArea1()
ob3 = RobotPatrolArea2()
ob2.start()
ob3.start()
| true
| true
|
f715cb4c8e53b868f96f37ece94c476794cd5b19
| 17,826
|
py
|
Python
|
sphero_res_learner_1D.py
|
koro/smp_sphero
|
614f4958816b565c7950ea0c6e6249864fbf2efe
|
[
"MIT"
] | 1
|
2020-12-13T13:02:55.000Z
|
2020-12-13T13:02:55.000Z
|
sphero_res_learner_1D.py
|
koro/smp_sphero
|
614f4958816b565c7950ea0c6e6249864fbf2efe
|
[
"MIT"
] | null | null | null |
sphero_res_learner_1D.py
|
koro/smp_sphero
|
614f4958816b565c7950ea0c6e6249864fbf2efe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""self-organizing behaviour: smp / inverse model learning for sphero, 1-dimensional"""
import rospy
import signal
import time, sys, argparse, os
from std_msgs.msg import Float32, Float32MultiArray, ColorRGBA
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist, Quaternion #, Point, Pose, TwistWithCovariance, Vector3
from smp_msgs.msg import reservoir
import tf
# # additional paths
# localpaths = ["/path/to/smp/smp"]
# if localpaths[0] not in sys.path:
# sys.path.insert(0, localpaths[0])
from reservoirs import Reservoir
from learners import learnerEH
from smp_thread import smp_thread_ros
import numpy as np
import numpy.linalg as LA
class SpheroResLearner1D(learnerEH, smp_thread_ros):
"""Sphero experiments"""
modes = {"vel": 0, "res_gen": 1}
def __init__(self, args):
smp_thread_ros.__init__(self)
learnerEH.__init__(self, args)
self.name = "SpheroResLearner1D"
# print mode
self.mode = SpheroResLearner1D.modes[args.mode]
# self.loop_time = 1/100.
# self.loop_time = 1/50.
# self.loop_time = 1/40.
self.loop_time = 1/20.
# self.loop_time = 1/10.
# self.loop_time = 1.
self.isrunning = True
# rospy.init_node(self.name)
self.sub["imu"] = rospy.Subscriber("/imu",
Imu, self.cb_imu)
self.sub["odom"] = rospy.Subscriber("/odom",
Odometry, self.cb_odom)
if os.environ["ROS_DISTRO"] == "hydro":
self.pub["twist"] = rospy.Publisher("/cmd_vel", Twist)
self.pub["color"] = rospy.Publisher("/set_color", ColorRGBA)
self.pub["target"] = rospy.Publisher("/learner/target", reservoir)
self.pub["learn_zn"] = rospy.Publisher("/learner/zn", reservoir)
else:
self.pub["twist"] = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
self.pub["color"] = rospy.Publisher("/set_color", ColorRGBA, queue_size=1)
self.pub["target"] = rospy.Publisher("/learner/target", reservoir, queue_size=1)
self.pub["learn_zn"] = rospy.Publisher("/learner/zn", reservoir, queue_size=1)
# counting
self.cb_odom_cnt = 0
self.cb_imu_cnt = 0
self.cnt = 0
self.cnt_inc = 1
# save odometry state
self.odom = Odometry()
# motor output struct
self.T = Twist()
print "Twist", self.T
self.err = np.zeros((self.cfg.len_episode, self.cfg.odim))
# self.cfg.target = np.zeros_like(self.iosm.x)
self.cfg.target = np.ones((self.cfg.odim, 1)) * -0.6
# self.target_change_period = np.random.uniform(20, 100) * 10
# self.target_change_period = np.random.uniform(20, 100) * 1
self.target_change_period = 100
self.noise = 0.
# sphero color
self.color = ColorRGBA()
# learning
self.len_learning = self.cfg.len_episode * self.cfg.ratio_testing
def cb_odom(self, msg):
"""ROS odometry callback, copy incoming data into local memory"""
# print type(msg)
self.odom = msg
# self.iosm.x[0] = self.odom.pose.pose.position.x
# self.iosm.x[1] = self.odom.pose.pose.position.y
# self.iosm.x_raw[0] = self.odom.pose.pose.position.x
# self.iosm.x_raw[1] = self.odom.pose.pose.position.y
self.iosm.x_raw[0] = self.odom.twist.twist.linear.x
# self.iosm.x_raw[1] = self.odom.twist.twist.linear.x - self.cfg.target
# self.iosm.x_raw[3] = self.odom.twist.twist.linear.y
self.cb_odom_cnt += 1
return
def cb_imu(self, msg):
"""ROS IMU callback: use odometry and incoming imu data to trigger
sensorimotor loop execution"""
# print "odom", self.odom
# print "imu", msg
# return
# (r, p, y) = tf.transformations.euler_from_quaternion([msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])
# # print r, p, y
# self.iosm.x_raw[4] = r
# self.iosm.x_raw[5] = p
# self.iosm.x_raw[6] = y
# self.iosm.x_raw[7] = msg.angular_velocity.x
# self.iosm.x_raw[8] = msg.angular_velocity.y
# self.iosm.x_raw[9] = msg.angular_velocity.z
# self.iosm.x_raw[10] = msg.linear_acceleration.x
# self.iosm.x_raw[11] = msg.linear_acceleration.y
# self.iosm.x_raw[12] = msg.linear_acceleration.z
self.cb_imu_cnt += 1
# time.sleep(0.1)
# if self.cnt > 20:
# rospy.signal_shutdown("stop")
# sys.exit(0)
def local_hooks(self):
pass
def prepare_inputs(self):
if False and self.use_icm: # always true as of learnerEH 20150304
# print (self.iosm.x_raw)
self.iosm.x = self.input_coupling_mtx * self.iosm.x_raw # component-wise
else:
self.iosm.x[1,0] = self.iosm.x_raw[0,0] - self.cfg.target[0,0]
# self.iosm.x[1,0] = self.cfg.target[0,0]
self.iosm.x[0,0] = self.iosm.x_raw[0,0]
# self.iosm.x = self.iosm.x_raw
# self.iosm.x[0:2,0] = 0.
# self.iosm.x[4:,0] = 0.
print "self.iosm.x", self.iosm.x
# pass
def prepare_output(self, z, zn):
print "z, zn", z, zn
if self.cfg.mode == "vel":
if self.cnt_main < self.len_learning:
# reassign
z_ = zn[0]
else:
# reassign
z_ = z[0]
self.T.linear.x = (0. + z_) * self.cfg.res_output_scaling
else:
self.T.linear.x = (0.0 + zn[0]) * self.cfg.res_output_scaling
# self.T.linear.y = zn[1] * self.cfg.res_output_scaling
# set sphero color
# if z >= 0.:
# self.color.r = z
# else:
# self.color.g = np.abs(z)
# print "color: error", self.iosm.e
color = np.clip(np.abs(self.iosm.e), 0., 1.)
self.color.r = color
self.pub["color"].publish(self.color)
self.pub["twist"].publish(self.T)
def controller(self):
# self.T.angular.x = np.random.uniform(0.0, 0.1)
now = self.cnt_main
print "iosm.x_raw", now, self.iosm.x_raw
# print "now", now
if self.cfg.mode == "vel":
if True: # self.cnt_main % 1 == 0:
# target_change_period = 300
# target_change_period = 100.
# target_change_period = 50.
# target_change_period = 20.
target_change_period = self.target_change_period
print "xxxxx", self.cfg.tp_target_spec
# constant target
if self.cfg.tp_target_spec["constant"]:
if self.cnt_main == 10:
# self.target[0,0] = self.cfg.target
if np.random.uniform(0, 1.) > 0.5:
target_sign = 1
else:
target_sign = -1
self.cfg.target[0,0] = target_sign * np.random.uniform(0.5, 1.2)
elif self.cfg.tp_target_spec["jumping"]:
# jumping target
print range(self.cfg.len_washout+1, self.cfg.len_episode, int(target_change_period))
if self.cnt_main in range(self.cfg.len_washout+1, self.cfg.len_episode, int(target_change_period)):
self.cfg.target[0,0] = 1. + np.random.uniform(-0.5, 0.5)
# self.cfg.target[0,0] = np.random.uniform(-1, 1)
elif self.cfg.tp_target_spec["jumping_sign"]:
# jumping target inverting sign
if self.cnt_main in range(self.cfg.len_washout+1, self.cfg.len_episode, int(target_change_period+1)):
if self.cnt_main % 2 == 0:
print "even"
self.cfg.target[0,0] = 1. + np.random.uniform(-0.3, 0.3)
else:
print "odd"
self.cfg.target[0,0] = -1. + np.random.uniform(-0.3, 0.3)
elif self.cfg.tp_target_spec["sine"]:
# sinewave target fixed amp
print "sinewave target"
# self.cfg.target[0,0] = -1.25 + 0.5 * np.cos(self.cnt_main/15.)
# self.cfg.target[0,0] = 1 + 0.3 * np.sin(self.cnt_main/target_change_period)
self.cfg.target[0,0] = 1. + 0.3 * np.sin(self.cnt_main/float(target_change_period))
print "self.cfg.target", self.cfg.target
elif self.cfg.tp_target_spec["sine_sign"]:
# sinewave target fixed amp
# self.cfg.target[0,0] = -1.25 + 0.5 * np.cos(self.cnt_main/15.)
# self.cfg.target[0,0] = 1 + 0.3 * np.sin(self.cnt_main/target_change_period)
self.cfg.target[0,0] = 0. + 1. * np.sin(self.cnt_main/target_change_period)
# # sinewave with increasing amplitude
# self.cfg.target[0,0] = 1.
# sinamp = np.clip(self.cnt_main/4000., -0.2, 0.2)
# self.cfg.target[0,0] += sinamp * np.sin(self.cnt_main/50.)
print "target", self.cfg.target
self.pub["target"].publish(self.cfg.target)
err = self.iosm.x_raw[0] - self.cfg.target #
# print "err", err
err1 = -np.sum(np.square(err))
self.err[self.cnt_main,0] = err1
if self.cnt_main > 200:
# print "mse window", self.err[(self.cnt_main-200):self.cnt_main,0]
self.iosm.mse = np.mean(np.sqrt(-self.err[(self.cnt_main-200):self.cnt_main,0])) * np.ones_like(self.cfg.target)
print "mse", self.iosm.mse
else:
self.iosm.mse = np.mean(np.sqrt(-self.err[0:self.cnt_main,0])) * np.ones_like(self.cfg.target)
# self.err[self.cnt_main,1] = err1
# print "self.rew.perf.shape", self.rew.perf.shape
# print self.err[self.cnt_main,0]
# FIXED: acceleration based reward
# FIXME: sign is inverted when running on different machines?
# FIXME: this performance measure asks for infinitely large accelerations
# self.rew.perf = -np.sign(err) * np.array([self.iosm.x_raw[2,0]]).reshape((1,1)) # eta
# self.rew.perf[0,0] = err1 # np.sign(err) * np.array([self.iosm.x_raw[2,0]]).reshape((1,1)) # rudi
# FIXME: take derivative of error
self.rew.perf[0,0] = err1 + self.iosm.e[0,0]
# self.rew.perf[1,0] = err1
self.iosm.e[0,0] = err1
self.iosm.t[0,0] = self.cfg.target[0,0]
# if np.abs(self.rew.perf) > 1:
# self.rew.perf = np.array((-1.)).reshape((1,1))
# lp history sould be decaying actually, so it's easier to be better
self.rew.perf_lp = ((1 - self.rew.coeff_a) * self.rew.perf_lp) + (self.rew.coeff_a * self.rew.perf)
if self.cfg.use_et:
self.learnEHE()
# pass
else:
self.learnEH()
# pass
# self.learnEHpub()
# energy based reward
# learn stuff
# self.iosm.x[1] = err
# self.iosm.x[1] = self.cfg.target[0,0] - 1.
# self.iosm.x[1] = 0.
self.iosm.z = self.res.execute(self.iosm.x)
self.iosm.zn = self.res.zn
# self.iosm.z = self.res.execute(err)
# self.iosm.z[1,0] = 0.
# self.res.z[1,0] = 0.
# self.res.zn[1,0] = 0.
# reset input
# self.iosm.x_raw[0,0] = 0.
else:
self.iosm.z = self.iosm.z
# return (self.res.z, self.res.zn)
# return (self.iosm.z, self.iosm.zn)
elif self.cfg.mode == "PIDvel":
self.err = self.iosm.x_raw[2:4] - np.ones((self.cfg.odim, 1)) * 0.2
print "self.err", self.err
# self.iosm.z = 10.0 * self.err
self.iosm.z[0,0] = 0.5 * np.cos(self.cnt_main/20.)
# self.iosm.z[1,0] = 0.5 * np.sin(self.cnt_main/10.)
elif self.cfg.mode == "noise":
noise = np.random.normal(0.5, 1.)
self.noise = 0.9 * self.noise + 0.1 * noise
# self.noise = noise
# self.noise = 1. + 0.5 * np.sin(self.cnt_main/10.)
# self.noise = 0.7
self.iosm.z[0,0] = self.noise
elif self.cfg.mode == "bump":
print "bump"
# z = np.zeros((self.cfg.odim, 1))
if self.cnt_main in range(100, 200):
self.iosm.z[0,0] = 1.
elif self.cnt_main in range(300, 400):
self.iosm.z[0,0] = -1.
# elif self.cnt_main in range(260, 280):
# self.iosm.z[1,0] = 1.
# elif self.cnt_main in range(320, 340):
# self.iosm.z[1,0] = -1.
else:
self.iosm.z[0,0] = 0.
# self.iosm.z[1,0] = 0.
elif self.cfg.mode == "ramp":
if self.cnt >= 255:
self.cnt_inc = -1
elif self.cnt <= -255:
self.cnt_inc = 1
self.cnt += self.cnt_inc
self.iosm.zn[0,0] = self.cnt / 100.
print "ramp", self.iosm.zn
# self.iosm.z[0,0] = (255 + np.abs((self.cnt_main % 510) - 255)) * 0.005
# if np.random.uniform(0, 1.) > 0.9:
# print "hickup"
# time.sleep(0.5)
# store states for pushback: see above
print "publishing zn"
self.pub["learn_zn"].publish(self.iosm.zn)
return(self.iosm.z, self.iosm.zn)
def main(args):
# fix RNG seed
np.random.seed(args.seed)
# check datadir exists
if not os.path.exists(args.datadir):
print "Datadir doesn't exist, try mkdir '%s'" % (args.datadir)
if args.mode == "res_gen":
# pre-generate N reservoirs for use in experiments
for i in range(10):
s = SpheroResLearner1D(args)
timestamp = time.strftime("%Y-%m-%d-%H%M%S")
filename = "%s/reservoir-%d-%s.bin" % (args.datadir, i, timestamp)
s.res.save(filename=filename)
s.isrunning = False
sys.exit(0)
else:
s = SpheroResLearner1D(args)
# do we get passed a trained network for testing?
if args.resfile != "":
# then load it
print "loading reservoir from file %s" % (args.resfile)
s.res.load(filename="%s/%s" % (args.datadir, args.resfile))
# bend some parameters, e.g. eta and theta
print "overriding config for eta, theta"
s.cfg.eta_EH = 0.
s.cfg.res_theta = 1e-3
s.res.set_theta(s.cfg.res_theta)
def handler(signum, frame):
print 'Signal handler called with signal', signum
s.isrunning = False
sys.exit(0)
# raise IOError("Couldn't open device!")
# install interrupt handler
signal.signal(signal.SIGINT, handler)
# run
s.start()
while not rospy.is_shutdown():
# print("main loop")
time.sleep(1)
# timestamp
timestamp = time.strftime("%Y%m%d-%H%M%S")
target_str = s.cfg.tp_target_spec.keys()[s.cfg.tp_target_spec.values().index(1)]
print "target_str", target_str
filename = "%s/log-learner-%s-N-%d-eta-%f-theta-%f-g-%f-target-%s" % (args.datadir,
timestamp,
s.cfg.N, s.cfg.eta_EH, s.cfg.res_theta, s.cfg.g,
target_str)
# save logs
s.savelogs(ts=timestamp, filename=filename)
# save network
if args.resfile == "":
target_str = s.cfg.tp_target_spec.keys()[s.cfg.tp_target_spec.values().index(1)]
print "target_str", target_str
filename = "%s/reservoir-%d-%s-N-%d-eta-%f-theta-%f-g-%f-target-%s.bin" % (args.datadir,
1, timestamp,
s.cfg.N, s.cfg.eta_EH, s.cfg.res_theta, s.cfg.g,
target_str)
s.res.save(filename=filename)
else:
print "not saving network, already loaded"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="reservoir smp learner for sphero")
parser.add_argument("-c", "--config", dest="cfgfilename", help="config file to use",
default="default.cfg")
parser.add_argument("-d", "--datadir", help="Directory root for saved configurations",
default=parser.prog[0:-3])
parser.add_argument("-m", "--mode", type=str, help="select mode: " + str(SpheroResLearner1D.modes),
default="vel")
parser.add_argument("-rf", "--resfile", dest="resfile", help="pickled reservoir to load",
default="")
parser.add_argument("-s", "--seed", dest="seed", help="seed for rng", default=123, type=int)
args = parser.parse_args()
# good seeds: 4,
main(args)
| 42.954217
| 140
| 0.521542
|
"""self-organizing behaviour: smp / inverse model learning for sphero, 1-dimensional"""
import rospy
import signal
import time, sys, argparse, os
from std_msgs.msg import Float32, Float32MultiArray, ColorRGBA
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist, Quaternion
from smp_msgs.msg import reservoir
import tf
rvoirs import Reservoir
from learners import learnerEH
from smp_thread import smp_thread_ros
import numpy as np
import numpy.linalg as LA
class SpheroResLearner1D(learnerEH, smp_thread_ros):
"""Sphero experiments"""
modes = {"vel": 0, "res_gen": 1}
def __init__(self, args):
smp_thread_ros.__init__(self)
learnerEH.__init__(self, args)
self.name = "SpheroResLearner1D"
self.mode = SpheroResLearner1D.modes[args.mode]
self.loop_time = 1/20.
self.isrunning = True
self.sub["imu"] = rospy.Subscriber("/imu",
Imu, self.cb_imu)
self.sub["odom"] = rospy.Subscriber("/odom",
Odometry, self.cb_odom)
if os.environ["ROS_DISTRO"] == "hydro":
self.pub["twist"] = rospy.Publisher("/cmd_vel", Twist)
self.pub["color"] = rospy.Publisher("/set_color", ColorRGBA)
self.pub["target"] = rospy.Publisher("/learner/target", reservoir)
self.pub["learn_zn"] = rospy.Publisher("/learner/zn", reservoir)
else:
self.pub["twist"] = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
self.pub["color"] = rospy.Publisher("/set_color", ColorRGBA, queue_size=1)
self.pub["target"] = rospy.Publisher("/learner/target", reservoir, queue_size=1)
self.pub["learn_zn"] = rospy.Publisher("/learner/zn", reservoir, queue_size=1)
self.cb_odom_cnt = 0
self.cb_imu_cnt = 0
self.cnt = 0
self.cnt_inc = 1
self.odom = Odometry()
self.T = Twist()
print "Twist", self.T
self.err = np.zeros((self.cfg.len_episode, self.cfg.odim))
self.cfg.target = np.ones((self.cfg.odim, 1)) * -0.6
self.target_change_period = 100
self.noise = 0.
self.color = ColorRGBA()
self.len_learning = self.cfg.len_episode * self.cfg.ratio_testing
def cb_odom(self, msg):
"""ROS odometry callback, copy incoming data into local memory"""
self.odom = msg
self.iosm.x_raw[0] = self.odom.twist.twist.linear.x
self.cb_odom_cnt += 1
return
def cb_imu(self, msg):
"""ROS IMU callback: use odometry and incoming imu data to trigger
sensorimotor loop execution"""
self.cb_imu_cnt += 1
def local_hooks(self):
pass
def prepare_inputs(self):
if False and self.use_icm:
self.iosm.x = self.input_coupling_mtx * self.iosm.x_raw
else:
self.iosm.x[1,0] = self.iosm.x_raw[0,0] - self.cfg.target[0,0]
self.iosm.x[0,0] = self.iosm.x_raw[0,0]
print "self.iosm.x", self.iosm.x
def prepare_output(self, z, zn):
print "z, zn", z, zn
if self.cfg.mode == "vel":
if self.cnt_main < self.len_learning:
z_ = zn[0]
else:
z_ = z[0]
self.T.linear.x = (0. + z_) * self.cfg.res_output_scaling
else:
self.T.linear.x = (0.0 + zn[0]) * self.cfg.res_output_scaling
color = np.clip(np.abs(self.iosm.e), 0., 1.)
self.color.r = color
self.pub["color"].publish(self.color)
self.pub["twist"].publish(self.T)
def controller(self):
now = self.cnt_main
print "iosm.x_raw", now, self.iosm.x_raw
if self.cfg.mode == "vel":
if True:
target_change_period = self.target_change_period
print "xxxxx", self.cfg.tp_target_spec
if self.cfg.tp_target_spec["constant"]:
if self.cnt_main == 10:
if np.random.uniform(0, 1.) > 0.5:
target_sign = 1
else:
target_sign = -1
self.cfg.target[0,0] = target_sign * np.random.uniform(0.5, 1.2)
elif self.cfg.tp_target_spec["jumping"]:
print range(self.cfg.len_washout+1, self.cfg.len_episode, int(target_change_period))
if self.cnt_main in range(self.cfg.len_washout+1, self.cfg.len_episode, int(target_change_period)):
self.cfg.target[0,0] = 1. + np.random.uniform(-0.5, 0.5)
elif self.cfg.tp_target_spec["jumping_sign"]:
if self.cnt_main in range(self.cfg.len_washout+1, self.cfg.len_episode, int(target_change_period+1)):
if self.cnt_main % 2 == 0:
print "even"
self.cfg.target[0,0] = 1. + np.random.uniform(-0.3, 0.3)
else:
print "odd"
self.cfg.target[0,0] = -1. + np.random.uniform(-0.3, 0.3)
elif self.cfg.tp_target_spec["sine"]:
print "sinewave target"
self.cfg.target[0,0] = 1. + 0.3 * np.sin(self.cnt_main/float(target_change_period))
print "self.cfg.target", self.cfg.target
elif self.cfg.tp_target_spec["sine_sign"]:
self.cfg.target[0,0] = 0. + 1. * np.sin(self.cnt_main/target_change_period)
print "target", self.cfg.target
self.pub["target"].publish(self.cfg.target)
err = self.iosm.x_raw[0] - self.cfg.target
err1 = -np.sum(np.square(err))
self.err[self.cnt_main,0] = err1
if self.cnt_main > 200:
self.iosm.mse = np.mean(np.sqrt(-self.err[(self.cnt_main-200):self.cnt_main,0])) * np.ones_like(self.cfg.target)
print "mse", self.iosm.mse
else:
self.iosm.mse = np.mean(np.sqrt(-self.err[0:self.cnt_main,0])) * np.ones_like(self.cfg.target)
]
self.iosm.e[0,0] = err1
self.iosm.t[0,0] = self.cfg.target[0,0]
self.rew.perf_lp = ((1 - self.rew.coeff_a) * self.rew.perf_lp) + (self.rew.coeff_a * self.rew.perf)
if self.cfg.use_et:
self.learnEHE()
# pass
else:
self.learnEH()
# pass
# self.learnEHpub()
# energy based reward
# learn stuff
# self.iosm.x[1] = err
# self.iosm.x[1] = self.cfg.target[0,0] - 1.
# self.iosm.x[1] = 0.
self.iosm.z = self.res.execute(self.iosm.x)
self.iosm.zn = self.res.zn
# self.iosm.z = self.res.execute(err)
# self.iosm.z[1,0] = 0.
# self.res.z[1,0] = 0.
# self.res.zn[1,0] = 0.
# reset input
# self.iosm.x_raw[0,0] = 0.
else:
self.iosm.z = self.iosm.z
# return (self.res.z, self.res.zn)
# return (self.iosm.z, self.iosm.zn)
elif self.cfg.mode == "PIDvel":
self.err = self.iosm.x_raw[2:4] - np.ones((self.cfg.odim, 1)) * 0.2
print "self.err", self.err
# self.iosm.z = 10.0 * self.err
self.iosm.z[0,0] = 0.5 * np.cos(self.cnt_main/20.)
# self.iosm.z[1,0] = 0.5 * np.sin(self.cnt_main/10.)
elif self.cfg.mode == "noise":
noise = np.random.normal(0.5, 1.)
self.noise = 0.9 * self.noise + 0.1 * noise
# self.noise = noise
# self.noise = 1. + 0.5 * np.sin(self.cnt_main/10.)
# self.noise = 0.7
self.iosm.z[0,0] = self.noise
elif self.cfg.mode == "bump":
print "bump"
# z = np.zeros((self.cfg.odim, 1))
if self.cnt_main in range(100, 200):
self.iosm.z[0,0] = 1.
elif self.cnt_main in range(300, 400):
self.iosm.z[0,0] = -1.
# elif self.cnt_main in range(260, 280):
# self.iosm.z[1,0] = 1.
# elif self.cnt_main in range(320, 340):
# self.iosm.z[1,0] = -1.
else:
self.iosm.z[0,0] = 0.
# self.iosm.z[1,0] = 0.
elif self.cfg.mode == "ramp":
if self.cnt >= 255:
self.cnt_inc = -1
elif self.cnt <= -255:
self.cnt_inc = 1
self.cnt += self.cnt_inc
self.iosm.zn[0,0] = self.cnt / 100.
print "ramp", self.iosm.zn
# self.iosm.z[0,0] = (255 + np.abs((self.cnt_main % 510) - 255)) * 0.005
# if np.random.uniform(0, 1.) > 0.9:
# print "hickup"
# time.sleep(0.5)
# store states for pushback: see above
print "publishing zn"
self.pub["learn_zn"].publish(self.iosm.zn)
return(self.iosm.z, self.iosm.zn)
def main(args):
# fix RNG seed
np.random.seed(args.seed)
# check datadir exists
if not os.path.exists(args.datadir):
print "Datadir doesn't exist, try mkdir '%s'" % (args.datadir)
if args.mode == "res_gen":
for i in range(10):
s = SpheroResLearner1D(args)
timestamp = time.strftime("%Y-%m-%d-%H%M%S")
filename = "%s/reservoir-%d-%s.bin" % (args.datadir, i, timestamp)
s.res.save(filename=filename)
s.isrunning = False
sys.exit(0)
else:
s = SpheroResLearner1D(args)
if args.resfile != "":
print "loading reservoir from file %s" % (args.resfile)
s.res.load(filename="%s/%s" % (args.datadir, args.resfile))
print "overriding config for eta, theta"
s.cfg.eta_EH = 0.
s.cfg.res_theta = 1e-3
s.res.set_theta(s.cfg.res_theta)
def handler(signum, frame):
print 'Signal handler called with signal', signum
s.isrunning = False
sys.exit(0)
# install interrupt handler
signal.signal(signal.SIGINT, handler)
# run
s.start()
while not rospy.is_shutdown():
# print("main loop")
time.sleep(1)
# timestamp
timestamp = time.strftime("%Y%m%d-%H%M%S")
target_str = s.cfg.tp_target_spec.keys()[s.cfg.tp_target_spec.values().index(1)]
print "target_str", target_str
filename = "%s/log-learner-%s-N-%d-eta-%f-theta-%f-g-%f-target-%s" % (args.datadir,
timestamp,
s.cfg.N, s.cfg.eta_EH, s.cfg.res_theta, s.cfg.g,
target_str)
# save logs
s.savelogs(ts=timestamp, filename=filename)
# save network
if args.resfile == "":
target_str = s.cfg.tp_target_spec.keys()[s.cfg.tp_target_spec.values().index(1)]
print "target_str", target_str
filename = "%s/reservoir-%d-%s-N-%d-eta-%f-theta-%f-g-%f-target-%s.bin" % (args.datadir,
1, timestamp,
s.cfg.N, s.cfg.eta_EH, s.cfg.res_theta, s.cfg.g,
target_str)
s.res.save(filename=filename)
else:
print "not saving network, already loaded"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="reservoir smp learner for sphero")
parser.add_argument("-c", "--config", dest="cfgfilename", help="config file to use",
default="default.cfg")
parser.add_argument("-d", "--datadir", help="Directory root for saved configurations",
default=parser.prog[0:-3])
parser.add_argument("-m", "--mode", type=str, help="select mode: " + str(SpheroResLearner1D.modes),
default="vel")
parser.add_argument("-rf", "--resfile", dest="resfile", help="pickled reservoir to load",
default="")
parser.add_argument("-s", "--seed", dest="seed", help="seed for rng", default=123, type=int)
args = parser.parse_args()
# good seeds: 4,
main(args)
| false
| true
|
f715cb6225840f9ec494e8f8b22c82e88df7a2f3
| 45,794
|
py
|
Python
|
flair.py
|
MustafaElshani/flair
|
ea058f3cc056e92b6f8a9ec7f7790dd6bed5766c
|
[
"BSD-3-Clause"
] | null | null | null |
flair.py
|
MustafaElshani/flair
|
ea058f3cc056e92b6f8a9ec7f7790dd6bed5766c
|
[
"BSD-3-Clause"
] | null | null | null |
flair.py
|
MustafaElshani/flair
|
ea058f3cc056e92b6f8a9ec7f7790dd6bed5766c
|
[
"BSD-3-Clause"
] | null | null | null |
""" ADT, CMS """
import sys, argparse, subprocess, os, tempfile, glob
def align():
parser = argparse.ArgumentParser(description='flair-align parse options', \
usage='python flair.py align -g genome.fa -r <reads.fq>|<reads.fa> [options]')
parser.add_argument('align')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', \
nargs='+', type=str, required=True, help='FastA/FastQ files of raw reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome, can be minimap2 indexed')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair.aligned', help='output file name base (default: flair.aligned)')
parser.add_argument('-t', '--threads', type=str, \
action='store', dest='t', default='4', help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-c', '--chromsizes', type=str, action='store', dest='c', default='', \
help='''chromosome sizes tab-separated file, used for converting sam to genome-browser
compatible psl file''')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, \
help='specify this flag to use native-RNA specific alignment parameters for minimap2')
parser.add_argument('--psl', action='store_true', dest='p', \
help='also output sam-converted psl')
parser.add_argument('-v1.3', '--version1.3', action='store_true', dest='v', \
help='specify if samtools version 1.3+')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read alignment to the genome (1)')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Align unrecognized arguments: {}\n'.format(' '.join(unknown)))
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
try:
mm2_command = [args.m, '-ax', 'splice', '-t', args.t, '--secondary=no', args.g]+args.r
if args.n:
mm2_command[5:5] = ['-uf', '-k14']
if args.quiet:
if subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w'), \
stderr=open(args.o+'.mm2_stderr', 'w')):
return 1
elif subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w')):
return 1
except:
sys.stderr.write('Possible minimap2 error, specify executable path with -m\n')
return 1
if args.quality != 0:
if subprocess.call([args.sam, 'view', '-q', str(args.quality), '-h', '-S', args.o+'.sam'], \
stdout=open(args.o+'.q.sam', 'w'), stderr=open(args.o+'.samtools_stderr', 'w')):
sys.stderr.write('Possible issue with samtools, see {}\n'.format(args.o+'.samtools_stderr'))
return 1
subprocess.call(['mv', args.o+'.q.sam', args.o+'.sam'])
if args.p and subprocess.call([sys.executable, path+'bin/sam_to_psl.py', args.o+'.sam', \
args.o+'.psl', args.c]):
return 1
if subprocess.call([args.sam, 'view', '-h', '-Sb', '-@', args.t, args.o+'.sam'], \
stdout=open(args.o+'.unsorted.bam', 'w')): # calls samtools view, exit if an error code that != 0 results
sys.stderr.write('Possible issue with samtools executable\n')
return 1
if not args.v: # samtools version is < 1.3 or unspecified --> detect version
ver = subprocess.Popen([args.sam], stderr=subprocess.PIPE, universal_newlines=True)
for line in ver.stderr:
if 'Version:' in line:
v = line.rstrip()[line.find('Version:')+9:line.find('Version:')+12]
try:
if float(v) >= 1.3:
if not args.quiet: sys.stderr.write('Samtools version >= 1.3 detected\n')
args.v = True
break
except:
if not args.quiet: sys.stderr.write('Could not detect samtools version, assuming < 1.3\n')
if args.v: # samtools verison 1.3+
subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', '-o', args.o+'.bam'], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w'))
elif subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', args.o], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w')):
sys.stderr.write('If using samtools v1.3+, please specify -v1.3 argument\n')
return 1
subprocess.call([args.sam, 'index', args.o+'.bam'])
subprocess.call([sys.executable, path+'bin/bam2Bed12.py', '-i', args.o+'.bam'], stdout=open(args.o+'.bed', 'w'))
subprocess.call(['rm', args.o+'.unsorted.bam', args.o+'.unsorted.bam.stderr', args.o+'.samtools_stderr'])
return args.o+'.bed'
def correct(aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-correct parse options', \
usage='python flair.py correct -q query.bed12 [-f annotation.gtf]v[-j introns.tab] -g genome.fa [options]')
parser.add_argument('correct')
required = parser.add_argument_group('required named arguments')
atleastone = parser.add_argument_group('at least one of the following arguments is required')
if not aligned_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='uncorrected bed12 file')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
atleastone.add_argument('-j', '--shortread', action='store', dest='j', type=str, default='', \
help='bed format splice junctions from short-read sequencing')
atleastone.add_argument('-f', '--gtf', default='', \
action='store', dest='f', help='GTF annotation file')
parser.add_argument('-c', '--chromsizes', type=str, \
action='store', dest='c', default='', help='chromosome sizes tab-separated file')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, help='specify this flag to keep \
the strand of a read consistent after correction')
parser.add_argument('-t', '--threads', type=str, action='store', dest='t', default='4', \
help='splice site correction script number of threads (4)')
parser.add_argument('-w', '--ss_window', action='store', dest='w', default='10', \
help='window size for correcting splice sites (W=10)')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair', help='output name base (default: flair)')
parser.add_argument('--print_check', \
action='store_true', dest='p', default=False, help='Print err.txt with step checking.')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Correct unrecognized arguments: {}\n'.format(' '.join(unknown)))
if aligned_reads:
args.q = aligned_reads
if not args.j and not args.f:
sys.stderr.write('Please specify at least one of the -f or -j arguments for correction\n')
return 1
correction_cmd = [sys.executable, path+'bin/ssCorrect.py', '-i', args.q, \
'-w', args.w, '-p', args.t, '-o', args.o, '--progress', '-f', args.g]
if not args.n:
correction_cmd += ['--correctStrand']
if args.j:
correction_cmd += ['-j', args.j]
if args.f:
correction_cmd += ['-g', args.f]
if args.p:
correction_cmd += ['--print_check']
if subprocess.call(correction_cmd):
sys.stderr.write('Correction command did not exit with success status\n')
if args.c and subprocess.call([sys.executable, path+'bin/bed_to_psl.py', args.c, args.o+'_all_corrected.bed', \
args.o+'_all_corrected.psl']):
return 1
return args.o+'_all_corrected.bed'
def collapse_range(corrected_reads='', aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse-range -g genome.fa -r reads.bam -q <query.psl>|<query.bed> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='bam file(s) of the aligned reads')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if promoter regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse-range unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
args.r = [aligned_reads[:-3]+'bam']
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
if args.temp_dir == '':
args.temp_dir = tempfile.NamedTemporaryFile().name+'/'
if not os.path.isdir(args.temp_dir): # make temporary directory
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
# convert query to bed
if args.q[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.q, args.q+'.bed'])
args.q = args.q+'.bed'
# partition the bed file into independent regions
subprocess.call(['sort','-k1,1', '-k2,2n', '--parallel='+str(args.t), args.q], \
stdout=open(args.temp_dir+run_id+'.sorted.bed', 'w'))
if subprocess.call(['bedPartition', '-parallel='+str(args.t), args.temp_dir+run_id+'.sorted.bed', args.o+'.ranges.bed']):
sys.stderr.write('''Make sure bedPartition (http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/)
is an executable in your $PATH\n''')
return 1
ranges = []
for line in open(args.o+'.ranges.bed'):
line = line.rstrip().split('\t')
ranges += [line[0]+':'+line[1]+'-'+line[2]]
# index the bed file
subprocess.call(['bgzip', args.temp_dir+run_id+'.sorted.bed'])
if subprocess.call(['tabix', '-f', '--preset', 'bed', '--zero-based', args.temp_dir+run_id+'.sorted.bed.gz']):
return 1
# call collapse on all the ranges
p = Pool(args.t)
if 1 in p.map(collapse, ranges): # if a process failed
return 1
p.terminate()
# consolidate all the isoforms from all the ranges
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.bed'), stdout=open(args.o+'.isoforms.bed', 'w'))
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.fa'), stdout=open(args.o+'.isoforms.fa', 'w'))
if args.f:
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.gtf'), stdout=open(args.o+'.isoforms.gtf', 'w'))
subprocess.call(['rm']+glob.glob(args.temp_dir+run_id+'*'))
return args.o+'.isoforms.bed', args.o+'.isoforms.fa'
def collapse(genomic_range='', corrected_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse -g genome.fa -q <query.psl>|<query.bed> \
-r <reads.fq>/<reads.fa> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='FastA/FastQ files of raw reads')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('--3prime_regions', action='store', dest='threeprime', default='', \
help='TES regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if TSS/TES regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--no_end_adjustment', default=False, action='store_true', dest='no_end_adjustment', \
help='''when specified, TSS/TES from the gtf provided with -f will not be used to adjust isoform
TSSs/TESs each isoform will be determined from supporting reads''')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--range', default='', action='store', dest='range', \
help='''interval for which to collapse isoforms for, formatted chromosome:coord1-coord2 or tab-delimited;
if a range is specified, then the aligned reads bam must be specified with -r
and the query must be a sorted, bgzip-ed bed file''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
# housekeeping stuff
tempfile_dir = tempfile.NamedTemporaryFile().name
tempfile_name = tempfile_dir[tempfile_dir.rfind('/')+1:]+'.'
if args.temp_dir == '':
args.temp_dir = tempfile_dir+'/'
if not os.path.isdir(args.temp_dir): # make temporary directory
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
if genomic_range: # this module was called internally from collapse_range
args.range = genomic_range
args.o = args.temp_dir+run_id
args.q = args.temp_dir+run_id+'.sorted.bed.gz'
args.quiet = True
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality) # convert from int to str
args.o += '.'
if not os.path.exists(args.q):
sys.stderr.write('Query file path does not exist\n')
return 1
if os.stat(args.q).st_size == 0:
sys.stderr.write('Query file is empty\n')
return 1
# separate out the read sequences and corrected reads corresponding to the specified range
if args.range:
if '\t' in args.range:
args.range = args.range.split('\t')
args.range = args.range[0]+':'+args.range[1]+'-'+args.range[2]
ext = '.bed' # query file extension will be 'bed'
args.o += args.range+'.'
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
bams = []
for i in range(len(args.r)): # subset bam file for alignments within range
bams += [args.temp_dir+tempfile_name+args.range+str(i)+'.bam']
if subprocess.call([args.sam, 'view', '-h', args.r[i], args.range], \
stdout=open(bams[-1], 'w')):
return 1
args.r = []
for i in range(len(bams)): # read sequences of the alignments within range
args.r += [bams[i][:-3]+'fasta']
subprocess.call([args.sam, 'fasta', bams[i]], \
stdout=open(args.r[-1], 'w'), \
stderr=open(args.temp_dir+tempfile_name+'bam2fq_stderr', 'w'))
subprocess.call(['rm'] + bams)
chrom = args.range[:args.range.find(':')]
coord1 = args.range[args.range.find(':')+1:args.range.find('-')]
coord2 = args.range[args.range.find('-')+1:]
precollapse = args.temp_dir+tempfile_name+args.range+'.bed' # name of subsetted query file
coordfile = open(args.temp_dir+tempfile_name+args.range+'.range.bed', 'wt') # write range to a bed file
coordfile.write('\t'.join([chrom, coord1, coord2]))
coordfile.close()
if subprocess.call(['tabix', '-R', args.temp_dir+tempfile_name+args.range+'.range.bed', args.q], \
stdout=open(precollapse, 'w')):
sys.stderr.write('Query file needs to be a sorted, bgzip-ed, tabix-indexed bed file if range is specified\n')
return 1
else:
ext = '.'+args.q[-3:] # query file extension (bed or psl)
precollapse = args.q # query file unchanged
args.r = args.r[0].split(',') if ',' in args.r[0] else args.r # read sequences
# filter out the reads with TSSs without promoter support
intermediate = []
if args.p:
if not args.quiet: sys.stderr.write('Filtering out reads without promoter-supported TSS\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', args.q, args.temp_dir+tempfile_name+'tss.bed']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tss.bed', '-b', args.p], \
stdout=open(args.temp_dir+tempfile_name+'promoter_intersect.bed', 'w')):
return 1
precollapse = args.o+'promoter_supported'+ext # filename of promoter-supported, corrected reads
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'promoter_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tss.bed', precollapse]
if args.threeprime:
if not args.quiet: sys.stderr.write('Filtering out reads without TES support\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', precollapse, args.temp_dir+tempfile_name+'tes.bed', 'reverse']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tes.bed', '-b', args.threeprime], \
stdout=open(args.temp_dir+tempfile_name+'tes_intersect.bed', 'w')):
return 1
precollapse = args.o+'tes_supported'+ext # filename of 3' end-supported, corrected reads
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'tes_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tes.bed', precollapse]
collapse_cmd = [sys.executable, path+'bin/collapse_isoforms_precise.py', '-q', precollapse, \
'-m', str(args.max_ends), '-w', args.w, '-n', args.n, '-o', args.o+'firstpass.unfiltered'+ext]
if args.f and not args.no_end_adjustment:
collapse_cmd += ['-f', args.f]
if args.i:
collapse_cmd += ['-i']
if args.quiet:
collapse_cmd += ['--quiet']
if subprocess.call(collapse_cmd):
return 1
# filtering out subset isoforms with insuficient support
if subprocess.call([sys.executable, path+'bin/filter_collapsed_isoforms.py', \
args.o+'firstpass.unfiltered'+ext, args.filter, args.o+'firstpass'+ext, args.w]):
return 1
intermediate += [args.o+'firstpass.unfiltered'+ext]
# rename first-pass isoforms to annotated transcript IDs if they match
if args.f:
if not args.quiet: sys.stderr.write('Renaming isoforms\n')
if subprocess.call([sys.executable, path+'bin/identify_gene_isoform.py', \
args.o+'firstpass'+ext, args.f, args.o+'firstpass.named'+ext]):
sys.exit(1)
subprocess.call(['mv', args.o+'firstpass.named'+ext, args.o+'firstpass'+ext])
if subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'firstpass'+ext, \
args.g, args.o+'firstpass.fa']):
return 1
# reassign reads to first-pass isoforms
if not args.quiet: sys.stderr.write('Aligning reads to first-pass isoform reference\n')
count_files, align_files = [], []
alignout = args.temp_dir + tempfile_name +'firstpass.'
try:
if subprocess.call([args.m, '-a', '-t', args.t, '-N', '4', args.o+'firstpass.fa'] + args.r, \
stdout=open(alignout+'sam', 'w'), stderr=open(alignout+'mm2_stderr', 'w')):
return 1
except Exception as e:
sys.stderr.write(str(e)+'\n\n\nMinimap2 error, please check that all file, directory, and executable paths exist\n')
return 1
# count the number of supporting reads for each first-pass isoform
if args.salmon: # use salmon to count
if subprocess.call([args.sam, 'view', '-F', '4', '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'mapped.sam', 'w')):
return 1
subprocess.call(['mv', alignout+'mapped.sam', alignout+'sam'])
subprocess.call([args.salmon, 'quant', '-t', args.o+'firstpass.fa', '-o', alignout+'salmon', \
'-p', args.t, '-l', 'U', '-a', alignout+'sam'], stderr=open(alignout+'salmon_stderr.txt', 'w'))
count_file = alignout+'salmon/quant.sf'
align_files += [alignout+'sam', alignout+'salmon/quant.sf']
else:
args.quality = '0' if args.trust_ends else args.quality
if args.quality != '0':
subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'q.sam', 'w'), stderr=open(alignout+'q.samtools_stderr', 'w'))
align_files += [alignout+'sam']
else:
subprocess.call(['mv', alignout+'sam', alignout+'q.sam'])
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', alignout+'q.sam', \
'-o', alignout+'q.counts', '-t', args.t, '--quality', args.quality]
if args.stringent:
count_cmd += ['--stringent', '-i', args.o+'firstpass'+ext]
if args.trust_ends:
count_cmd += ['--trust_ends']
if args.generate_map:
count_cmd += ['--generate_map', args.o+'isoform.read.map.txt']
if subprocess.call(count_cmd):
sys.stderr.write('Failed at counting step for isoform read support\n')
return 1
count_file = alignout+'q.counts'
align_files += [alignout+'q.sam']
subprocess.call([sys.executable, path+'bin/combine_counts.py', count_file, args.o+'firstpass.q.counts'])
if not args.quiet: sys.stderr.write('Filtering isoforms by read coverage\n')
subprocess.call([sys.executable, path+'bin/match_counts.py', args.o+'firstpass.q.counts', \
args.o+'firstpass'+ext, args.s, args.o+'isoforms'+ext])
subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'isoforms'+ext, \
args.g, args.o+'isoforms.fa'])
if args.f:
subprocess.call([sys.executable, path+'bin/psl_to_gtf.py', args.o+'isoforms'+ext], \
stdout=open(args.o+'isoforms.gtf', 'w'))
subprocess.call(['rm', '-rf', args.o+'firstpass.fa', alignout+'q.counts'])
if not args.keep_intermediate:
subprocess.call(['rm', args.o+'firstpass.q.counts', args.o+'firstpass'+ext])
subprocess.call(['rm', '-rf'] + glob.glob(args.temp_dir+'*'+tempfile_name+'*') + align_files + intermediate)
return args.o+'isoforms.bed', args.o+'isoforms.fa'
def quantify(isoform_sequences=''):
parser = argparse.ArgumentParser(description='flair-quantify parse options', \
usage='python flair.py quantify -r reads_manifest.tsv -i isoforms.fa [options]')
parser.add_argument('quantify')
required = parser.add_argument_group('required named arguments')
if not isoform_sequences:
required.add_argument('-r', '--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
required.add_argument('-i', '--isoforms', action='store', dest='i', \
type=str, required=True, help='FastA of FLAIR collapsed isoforms')
else:
required.add_argument('--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='specify a samtools executable path if not in $PATH if --quality is also used')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='''minimum MAPQ of read assignment to an isoform. If using salmon, all alignments are
used (1)''')
parser.add_argument('-o', '--output', type=str, action='store', dest='o', \
default='counts_matrix.tsv', help='Counts matrix output file name prefix (counts_matrix.tsv)')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--tpm', action='store_true', dest='tpm', default=False, \
help='specify this flag to output additional file with expression in TPM')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='''directory to put temporary files. use "./" to indicate current directory
(default: python tempfile directory)''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Quantify unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoform_sequences:
args.i = isoform_sequences
args.o += '.counts_matrix.tsv'
try:
import numpy as np
import codecs
except:
sys.stderr.write('Numpy import error. Please pip install numpy. Exiting.\n')
sys.exit(1)
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality)
samData = list()
with codecs.open(args.r, "r", encoding='utf-8', errors='ignore') as lines:
for line in lines:
cols = line.rstrip().split('\t')
if len(cols)<4:
sys.stderr.write('Expected 4 columns in manifest.tsv, got %s. Exiting.\n' % len(cols))
return 1
sample, group, batch, readFile = cols
readFileRoot = tempfile.NamedTemporaryFile().name
if args.temp_dir != '':
if not os.path.isdir(args.temp_dir):
subprocess.call(['mkdir', args.temp_dir])
readFileRoot = args.temp_dir + '/' + readFileRoot[readFileRoot.rfind('/')+1:]
samData.append(cols + [readFileRoot + '.sam'])
for num,sample in enumerate(samData,0):
sys.stderr.write("Step 1/3. Aligning sample %s_%s: %s/%s \r" % (sample[0],sample[2],num+1,len(samData)))
mm2_command = [args.m, '-a', '-N', '4', '-t', args.t, args.i, sample[-2]]
try:
if subprocess.call(mm2_command, stdout=open(sample[-1], 'w'), \
stderr=open(sample[-1]+'.mm2_stderr.txt', 'w')):
sys.stderr.write('Check {} file\n'.format(sample[-1]+'.mm2_stderr.txt'))
return 1
except:
sys.stderr.write('''Possible minimap2 error, please check that all file, directory,
and executable paths exist\n''')
return 1
subprocess.call(['rm', sample[-1]+'.mm2_stderr.txt'])
sys.stderr.flush()
if args.quality != '0' and not args.trust_ends and not args.salmon:
if subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', sample[-1]], \
stdout=open(sample[-1]+'.qual.sam', 'w')):
return 1
subprocess.call(['mv', sample[-1]+'.qual.sam', sample[-1]])
countData = dict()
for num,data in enumerate(samData):
sample, group, batch, readFile, samOut = data
sys.stderr.write("Step 2/3. Quantifying isoforms for sample %s_%s: %s/%s \r" % (sample,batch,num+1,len(samData)))
if not args.salmon:
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', samOut, \
'-o', samOut+'.counts.txt', '-t', args.t, '--quality', args.quality]
if args.trust_ends:
count_cmd += ['--trust_ends']
subprocess.call(count_cmd)
for line in open(samOut+'.counts.txt'):
line = line.rstrip().split('\t')
iso, numreads = line[0], line[1]
if iso not in countData: countData[iso] = np.zeros(len(samData))
countData[iso][num] = numreads
else:
subprocess.call([args.salmon, 'quant', '-t', args.i, '-o', samOut[:-4]+'.salmon', \
'-p', args.t, '-l', 'U', '-a', samOut], stderr=open('salmon_stderr.txt', 'w'))
salmonOut = open(samOut[:-4]+'.salmon/quant.sf')
salmonOut.readline() # header
for line in salmonOut:
line = line.rstrip().split('\t')
iso, tpm, numreads = line[0], line[3], line[4]
if iso not in countData: countData[iso] = np.zeros(len(samData))
if args.tpm:
countData[iso][num] = tpm
else:
countData[iso][num] = numreads
subprocess.call(['rm', '-r', samOut[:-4]+'.salmon/', 'salmon_stderr.txt'])
sys.stderr.flush()
subprocess.call(['rm', samOut])
sys.stderr.write("Step 3/3. Writing counts to {} \r".format(args.o))
countMatrix = open(args.o,'w')
countMatrix.write("ids\t%s\n" % "\t".join(["_".join(x[:3]) for x in samData]))
features = sorted(list(countData.keys()))
for f in features:
countMatrix.write("%s\t%s\n" % (f,"\t".join(str(x) for x in countData[f])))
countMatrix.close()
sys.stderr.flush()
sys.stderr.write("\n")
if args.tpm and not args.salmon:
subprocess.call([sys.executable, path+'bin/counts_to_tpm.py', args.o, args.o+'.tpm.tsv'])
return args.o
def diffExp(counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffExp parse options', \
usage='python flair.py diffExp -q counts_matrix.tsv --out_dir out_dir [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not counts_matrix:
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='Tab-delimited isoform count matrix from flair quantify module.')
required.add_argument('-o', '--out_dir', action='store', dest='o', \
type=str, required=True, help='Output directory for tables and plots.')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=4, help='Number of threads for parallel DRIMSeq.')
parser.add_argument('-e', '--exp_thresh', action='store', dest='e', type=int, required=False, \
default=10, help='Read count expression threshold. Isoforms in which \
both conditions contain fewer than E reads are filtered out (Default E=10)')
parser.add_argument('-of', '--out_dir_force', action='store_true', dest='of', \
required=False, help='''Specify this argument to force overwriting of files in
an existing output directory''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffExp unrecognized arguments: {}\n'.format(' '.join(unknown)))
if counts_matrix:
args.q = counts_matrix
args.o+'.diffExp'
scriptsBin = path + "bin/"
runDE = scriptsBin + "deFLAIR.py"
DEcommand = [sys.executable, '-W ignore', runDE, '--filter', str(args.e), '--threads', \
str(args.t), '--outDir', args.o, '--matrix', args.q]
if args.of:
DEcommand += ['-of']
subprocess.call(DEcommand)
return
def diffSplice(isoforms='', counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffSplice parse options', \
usage='python flair.py diffSplice -i isoforms.bed|isoforms.psl -q counts_matrix.tsv [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not isoforms:
required.add_argument('-i', '--isoforms', action='store', dest='i', required=True, \
type=str, help='isoforms in bed or psl format')
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='tab-delimited isoform count matrix from flair quantify module')
parser.add_argument('-o', '--output', action='store', dest='o', default='flair.diffsplice', type=str, \
required=False, help='output file name base for FLAIR isoforms (default: flair.diffsplice)')
parser.add_argument('--test', action='store_true', dest='test', \
required=False, default=False, help='Run DRIMSeq statistical testing')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=1, help='Number of threads DRIMSeq (1)')
parser.add_argument('--drim1', action='store', dest='drim1', type=int, required=False, default=6, \
help='''The minimum number of samples that have coverage over an AS event inclusion/exclusion
for DRIMSeq testing; events with too few samples are filtered out and not tested (6)''')
parser.add_argument('--drim2', action='store', dest='drim2', type=int, required=False, default=3, \
help='''The minimum number of samples expressing the inclusion of an AS event;
events with too few samples are filtered out and not tested (3)''')
parser.add_argument('--drim3', action='store', dest='drim3', type=int, required=False, default=15, \
help='''The minimum number of reads covering an AS event inclusion/exclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (15)''')
parser.add_argument('--drim4', action='store', dest='drim4', type=int, required=False, default=5, \
help='''The minimum number of reads covering an AS event inclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (5)''')
parser.add_argument('--batch', action='store_true', dest='batch', required=False, default=False, \
help='''If specified with --test, DRIMSeq will perform batch correction''')
parser.add_argument('--conditionA', action='store', dest='conditionA', required=False, default='', \
help='''Specify one condition corresponding to samples in the counts_matrix to be compared against
condition2; by default, the first two unique conditions are used''')
parser.add_argument('--conditionB', action='store', dest='conditionB', required=False, default='', \
help='''Specify another condition corresponding to samples in the counts_matrix to be compared against
conditionA''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffSplice unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoforms:
args.i = isoforms
args.q = counts_matrix
if args.i[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.i, args.i+'.bed'])
args.i = args.i+'.bed'
subprocess.call([sys.executable, path+'bin/call_diffsplice_events.py', args.i, args.o, args.q])
subprocess.call([sys.executable, path+'bin/es_as.py', args.i], stdout=open(args.o+'.es.events.tsv','w'))
subprocess.call([sys.executable, path+'bin/es_as_inc_excl_to_counts.py', args.q, args.o+'.es.events.tsv'], \
stdout=open(args.o+'.es.events.quant.tsv','w'))
subprocess.call(['rm', args.o+'.es.events.tsv'])
if args.test or args.drim1 or args.drim2 or args.drim4 or args.drim4:
sys.stderr.write('DRIMSeq testing for each AS event type\n')
drim1, drim2, drim3, drim4 = [str(x) for x in [args.drim1, args.drim2, args.drim3, args.drim4]]
ds_command = [sys.executable, path+'bin/runDS.py', '--threads', str(args.t), \
'--drim1', drim1, '--drim2', drim2, '--drim3', drim3, '--drim4', drim4]
if args.batch:
ds_command += ['--batch']
if args.conditionA:
if not args.conditionB:
sys.stderr.write('Both conditionA and conditionB must be specified, or both left unspecified\n')
return 1
ds_command += ['--conditionA', args.conditionA, '--conditionB', args.conditionB]
with open(args.o+'.stderr.txt', 'w') as ds_stderr:
subprocess.call(ds_command + ['--matrix', args.o+'.es.events.quant.tsv', '--prefix', args.o+'.es'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt5.events.quant.tsv', '--prefix', args.o+'.alt5'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt3.events.quant.tsv', '--prefix', args.o+'.alt3'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.ir.events.quant.tsv', '--prefix', args.o+'.ir'], stderr=ds_stderr)
return
path = '/'.join(os.path.realpath(__file__).split("/")[:-1])+'/'
if len(sys.argv) < 2:
sys.stderr.write('usage: python flair.py <mode> --help \n')
sys.stderr.write('modes: align, correct, collapse, quantify, diffExp, diffSplice\n')
sys.stderr.write('Multiple modules can be run when specified using numbers, e.g.:\n')
sys.stderr.write('python flair.py 1234 ...')
sys.exit(1)
else:
mode = sys.argv[1].lower()
aligned_reads, corrected_reads, isoforms, isoform_sequences, counts_matrix = [0]*5
if mode == 'align' or '1' in mode:
status = align()
if status == 1:
sys.exit(1)
else:
aligned_reads = status
if mode == 'correct' or '2' in mode:
if aligned_reads:
status = correct(aligned_reads=aligned_reads)
else:
status = correct()
if status == 1:
sys.exit(1)
else:
corrected_reads = status
if mode == 'collapse' or ('3' in mode and '3.5' not in mode):
if corrected_reads:
status = collapse(corrected_reads=corrected_reads)
else:
status = collapse()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
if mode == 'collapse-range' or '3.5' in mode:
from multiprocessing import Pool
tempfile_name = tempfile.NamedTemporaryFile().name
run_id = tempfile_name[tempfile_name.rfind('/')+1:]
if corrected_reads and not aligned_reads:
sys.stderr.write('''Collapse 3.5 run consecutively without align module; will assume {}
to be the name of the aligned reads bam file\n'''.format(corrected_reads[:-18]+'.bam'))
status = collapse_range(corrected_reads=corrected_reads, \
aligned_reads=corrected_reads[:-18]+'.bam')
elif corrected_reads and aligned_reads:
status = collapse_range(corrected_reads=corrected_reads, aligned_reads=aligned_reads)
elif not corrected_reads and aligned_reads:
sys.stderr.write('Correct module not run...\n')
status = collapse_range(corrected_reads=aligned_reads, aligned_reads=aligned_reads)
else:
status = collapse_range()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
mode = mode.replace('3.5', 'x')
if mode == 'quantify' or '4' in mode:
if isoform_sequences:
status = quantify(isoform_sequences=isoform_sequences)
else:
status = quantify()
if status == 1:
sys.exit(1)
else:
counts_matrix = status
if mode == 'diffexp' or '5' in mode:
if counts_matrix:
status = diffExp(counts_matrix=counts_matrix)
else:
status = diffExp()
if status == 1:
sys.exit(1)
if mode == 'diffsplice' or '6' in mode:
if counts_matrix and isoforms:
status = diffSplice(isoforms=isoforms, counts_matrix=counts_matrix)
elif not isoforms and counts_matrix:
sys.stderr.write('DiffSplice run consecutively without collapse module, exiting\n')
sys.exit(1)
else:
status = diffSplice()
if status == 1:
sys.exit(1)
if mode == '--version':
sys.stderr.write('FLAIR v1.5.1\n')
| 50.769401
| 129
| 0.694414
|
import sys, argparse, subprocess, os, tempfile, glob
def align():
parser = argparse.ArgumentParser(description='flair-align parse options', \
usage='python flair.py align -g genome.fa -r <reads.fq>|<reads.fa> [options]')
parser.add_argument('align')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', \
nargs='+', type=str, required=True, help='FastA/FastQ files of raw reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome, can be minimap2 indexed')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair.aligned', help='output file name base (default: flair.aligned)')
parser.add_argument('-t', '--threads', type=str, \
action='store', dest='t', default='4', help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-c', '--chromsizes', type=str, action='store', dest='c', default='', \
help='''chromosome sizes tab-separated file, used for converting sam to genome-browser
compatible psl file''')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, \
help='specify this flag to use native-RNA specific alignment parameters for minimap2')
parser.add_argument('--psl', action='store_true', dest='p', \
help='also output sam-converted psl')
parser.add_argument('-v1.3', '--version1.3', action='store_true', dest='v', \
help='specify if samtools version 1.3+')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read alignment to the genome (1)')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Align unrecognized arguments: {}\n'.format(' '.join(unknown)))
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
try:
mm2_command = [args.m, '-ax', 'splice', '-t', args.t, '--secondary=no', args.g]+args.r
if args.n:
mm2_command[5:5] = ['-uf', '-k14']
if args.quiet:
if subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w'), \
stderr=open(args.o+'.mm2_stderr', 'w')):
return 1
elif subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w')):
return 1
except:
sys.stderr.write('Possible minimap2 error, specify executable path with -m\n')
return 1
if args.quality != 0:
if subprocess.call([args.sam, 'view', '-q', str(args.quality), '-h', '-S', args.o+'.sam'], \
stdout=open(args.o+'.q.sam', 'w'), stderr=open(args.o+'.samtools_stderr', 'w')):
sys.stderr.write('Possible issue with samtools, see {}\n'.format(args.o+'.samtools_stderr'))
return 1
subprocess.call(['mv', args.o+'.q.sam', args.o+'.sam'])
if args.p and subprocess.call([sys.executable, path+'bin/sam_to_psl.py', args.o+'.sam', \
args.o+'.psl', args.c]):
return 1
if subprocess.call([args.sam, 'view', '-h', '-Sb', '-@', args.t, args.o+'.sam'], \
stdout=open(args.o+'.unsorted.bam', 'w')):
sys.stderr.write('Possible issue with samtools executable\n')
return 1
if not args.v:
ver = subprocess.Popen([args.sam], stderr=subprocess.PIPE, universal_newlines=True)
for line in ver.stderr:
if 'Version:' in line:
v = line.rstrip()[line.find('Version:')+9:line.find('Version:')+12]
try:
if float(v) >= 1.3:
if not args.quiet: sys.stderr.write('Samtools version >= 1.3 detected\n')
args.v = True
break
except:
if not args.quiet: sys.stderr.write('Could not detect samtools version, assuming < 1.3\n')
if args.v:
subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', '-o', args.o+'.bam'], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w'))
elif subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', args.o], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w')):
sys.stderr.write('If using samtools v1.3+, please specify -v1.3 argument\n')
return 1
subprocess.call([args.sam, 'index', args.o+'.bam'])
subprocess.call([sys.executable, path+'bin/bam2Bed12.py', '-i', args.o+'.bam'], stdout=open(args.o+'.bed', 'w'))
subprocess.call(['rm', args.o+'.unsorted.bam', args.o+'.unsorted.bam.stderr', args.o+'.samtools_stderr'])
return args.o+'.bed'
def correct(aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-correct parse options', \
usage='python flair.py correct -q query.bed12 [-f annotation.gtf]v[-j introns.tab] -g genome.fa [options]')
parser.add_argument('correct')
required = parser.add_argument_group('required named arguments')
atleastone = parser.add_argument_group('at least one of the following arguments is required')
if not aligned_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='uncorrected bed12 file')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
atleastone.add_argument('-j', '--shortread', action='store', dest='j', type=str, default='', \
help='bed format splice junctions from short-read sequencing')
atleastone.add_argument('-f', '--gtf', default='', \
action='store', dest='f', help='GTF annotation file')
parser.add_argument('-c', '--chromsizes', type=str, \
action='store', dest='c', default='', help='chromosome sizes tab-separated file')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, help='specify this flag to keep \
the strand of a read consistent after correction')
parser.add_argument('-t', '--threads', type=str, action='store', dest='t', default='4', \
help='splice site correction script number of threads (4)')
parser.add_argument('-w', '--ss_window', action='store', dest='w', default='10', \
help='window size for correcting splice sites (W=10)')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair', help='output name base (default: flair)')
parser.add_argument('--print_check', \
action='store_true', dest='p', default=False, help='Print err.txt with step checking.')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Correct unrecognized arguments: {}\n'.format(' '.join(unknown)))
if aligned_reads:
args.q = aligned_reads
if not args.j and not args.f:
sys.stderr.write('Please specify at least one of the -f or -j arguments for correction\n')
return 1
correction_cmd = [sys.executable, path+'bin/ssCorrect.py', '-i', args.q, \
'-w', args.w, '-p', args.t, '-o', args.o, '--progress', '-f', args.g]
if not args.n:
correction_cmd += ['--correctStrand']
if args.j:
correction_cmd += ['-j', args.j]
if args.f:
correction_cmd += ['-g', args.f]
if args.p:
correction_cmd += ['--print_check']
if subprocess.call(correction_cmd):
sys.stderr.write('Correction command did not exit with success status\n')
if args.c and subprocess.call([sys.executable, path+'bin/bed_to_psl.py', args.c, args.o+'_all_corrected.bed', \
args.o+'_all_corrected.psl']):
return 1
return args.o+'_all_corrected.bed'
def collapse_range(corrected_reads='', aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse-range -g genome.fa -r reads.bam -q <query.psl>|<query.bed> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='bam file(s) of the aligned reads')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if promoter regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse-range unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
args.r = [aligned_reads[:-3]+'bam']
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
if args.temp_dir == '':
args.temp_dir = tempfile.NamedTemporaryFile().name+'/'
if not os.path.isdir(args.temp_dir):
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
if args.q[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.q, args.q+'.bed'])
args.q = args.q+'.bed'
subprocess.call(['sort','-k1,1', '-k2,2n', '--parallel='+str(args.t), args.q], \
stdout=open(args.temp_dir+run_id+'.sorted.bed', 'w'))
if subprocess.call(['bedPartition', '-parallel='+str(args.t), args.temp_dir+run_id+'.sorted.bed', args.o+'.ranges.bed']):
sys.stderr.write('''Make sure bedPartition (http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/)
is an executable in your $PATH\n''')
return 1
ranges = []
for line in open(args.o+'.ranges.bed'):
line = line.rstrip().split('\t')
ranges += [line[0]+':'+line[1]+'-'+line[2]]
subprocess.call(['bgzip', args.temp_dir+run_id+'.sorted.bed'])
if subprocess.call(['tabix', '-f', '--preset', 'bed', '--zero-based', args.temp_dir+run_id+'.sorted.bed.gz']):
return 1
p = Pool(args.t)
if 1 in p.map(collapse, ranges):
return 1
p.terminate()
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.bed'), stdout=open(args.o+'.isoforms.bed', 'w'))
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.fa'), stdout=open(args.o+'.isoforms.fa', 'w'))
if args.f:
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.gtf'), stdout=open(args.o+'.isoforms.gtf', 'w'))
subprocess.call(['rm']+glob.glob(args.temp_dir+run_id+'*'))
return args.o+'.isoforms.bed', args.o+'.isoforms.fa'
def collapse(genomic_range='', corrected_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse -g genome.fa -q <query.psl>|<query.bed> \
-r <reads.fq>/<reads.fa> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='FastA/FastQ files of raw reads')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('--3prime_regions', action='store', dest='threeprime', default='', \
help='TES regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if TSS/TES regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--no_end_adjustment', default=False, action='store_true', dest='no_end_adjustment', \
help='''when specified, TSS/TES from the gtf provided with -f will not be used to adjust isoform
TSSs/TESs each isoform will be determined from supporting reads''')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--range', default='', action='store', dest='range', \
help='''interval for which to collapse isoforms for, formatted chromosome:coord1-coord2 or tab-delimited;
if a range is specified, then the aligned reads bam must be specified with -r
and the query must be a sorted, bgzip-ed bed file''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
tempfile_dir = tempfile.NamedTemporaryFile().name
tempfile_name = tempfile_dir[tempfile_dir.rfind('/')+1:]+'.'
if args.temp_dir == '':
args.temp_dir = tempfile_dir+'/'
if not os.path.isdir(args.temp_dir):
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
if genomic_range:
args.range = genomic_range
args.o = args.temp_dir+run_id
args.q = args.temp_dir+run_id+'.sorted.bed.gz'
args.quiet = True
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality)
args.o += '.'
if not os.path.exists(args.q):
sys.stderr.write('Query file path does not exist\n')
return 1
if os.stat(args.q).st_size == 0:
sys.stderr.write('Query file is empty\n')
return 1
if args.range:
if '\t' in args.range:
args.range = args.range.split('\t')
args.range = args.range[0]+':'+args.range[1]+'-'+args.range[2]
ext = '.bed'
args.o += args.range+'.'
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
bams = []
for i in range(len(args.r)):
bams += [args.temp_dir+tempfile_name+args.range+str(i)+'.bam']
if subprocess.call([args.sam, 'view', '-h', args.r[i], args.range], \
stdout=open(bams[-1], 'w')):
return 1
args.r = []
for i in range(len(bams)):
args.r += [bams[i][:-3]+'fasta']
subprocess.call([args.sam, 'fasta', bams[i]], \
stdout=open(args.r[-1], 'w'), \
stderr=open(args.temp_dir+tempfile_name+'bam2fq_stderr', 'w'))
subprocess.call(['rm'] + bams)
chrom = args.range[:args.range.find(':')]
coord1 = args.range[args.range.find(':')+1:args.range.find('-')]
coord2 = args.range[args.range.find('-')+1:]
precollapse = args.temp_dir+tempfile_name+args.range+'.bed'
coordfile = open(args.temp_dir+tempfile_name+args.range+'.range.bed', 'wt')
coordfile.write('\t'.join([chrom, coord1, coord2]))
coordfile.close()
if subprocess.call(['tabix', '-R', args.temp_dir+tempfile_name+args.range+'.range.bed', args.q], \
stdout=open(precollapse, 'w')):
sys.stderr.write('Query file needs to be a sorted, bgzip-ed, tabix-indexed bed file if range is specified\n')
return 1
else:
ext = '.'+args.q[-3:]
precollapse = args.q
args.r = args.r[0].split(',') if ',' in args.r[0] else args.r
intermediate = []
if args.p:
if not args.quiet: sys.stderr.write('Filtering out reads without promoter-supported TSS\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', args.q, args.temp_dir+tempfile_name+'tss.bed']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tss.bed', '-b', args.p], \
stdout=open(args.temp_dir+tempfile_name+'promoter_intersect.bed', 'w')):
return 1
precollapse = args.o+'promoter_supported'+ext
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'promoter_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tss.bed', precollapse]
if args.threeprime:
if not args.quiet: sys.stderr.write('Filtering out reads without TES support\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', precollapse, args.temp_dir+tempfile_name+'tes.bed', 'reverse']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tes.bed', '-b', args.threeprime], \
stdout=open(args.temp_dir+tempfile_name+'tes_intersect.bed', 'w')):
return 1
precollapse = args.o+'tes_supported'+ext
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'tes_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tes.bed', precollapse]
collapse_cmd = [sys.executable, path+'bin/collapse_isoforms_precise.py', '-q', precollapse, \
'-m', str(args.max_ends), '-w', args.w, '-n', args.n, '-o', args.o+'firstpass.unfiltered'+ext]
if args.f and not args.no_end_adjustment:
collapse_cmd += ['-f', args.f]
if args.i:
collapse_cmd += ['-i']
if args.quiet:
collapse_cmd += ['--quiet']
if subprocess.call(collapse_cmd):
return 1
# filtering out subset isoforms with insuficient support
if subprocess.call([sys.executable, path+'bin/filter_collapsed_isoforms.py', \
args.o+'firstpass.unfiltered'+ext, args.filter, args.o+'firstpass'+ext, args.w]):
return 1
intermediate += [args.o+'firstpass.unfiltered'+ext]
# rename first-pass isoforms to annotated transcript IDs if they match
if args.f:
if not args.quiet: sys.stderr.write('Renaming isoforms\n')
if subprocess.call([sys.executable, path+'bin/identify_gene_isoform.py', \
args.o+'firstpass'+ext, args.f, args.o+'firstpass.named'+ext]):
sys.exit(1)
subprocess.call(['mv', args.o+'firstpass.named'+ext, args.o+'firstpass'+ext])
if subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'firstpass'+ext, \
args.g, args.o+'firstpass.fa']):
return 1
# reassign reads to first-pass isoforms
if not args.quiet: sys.stderr.write('Aligning reads to first-pass isoform reference\n')
count_files, align_files = [], []
alignout = args.temp_dir + tempfile_name +'firstpass.'
try:
if subprocess.call([args.m, '-a', '-t', args.t, '-N', '4', args.o+'firstpass.fa'] + args.r, \
stdout=open(alignout+'sam', 'w'), stderr=open(alignout+'mm2_stderr', 'w')):
return 1
except Exception as e:
sys.stderr.write(str(e)+'\n\n\nMinimap2 error, please check that all file, directory, and executable paths exist\n')
return 1
# count the number of supporting reads for each first-pass isoform
if args.salmon: # use salmon to count
if subprocess.call([args.sam, 'view', '-F', '4', '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'mapped.sam', 'w')):
return 1
subprocess.call(['mv', alignout+'mapped.sam', alignout+'sam'])
subprocess.call([args.salmon, 'quant', '-t', args.o+'firstpass.fa', '-o', alignout+'salmon', \
'-p', args.t, '-l', 'U', '-a', alignout+'sam'], stderr=open(alignout+'salmon_stderr.txt', 'w'))
count_file = alignout+'salmon/quant.sf'
align_files += [alignout+'sam', alignout+'salmon/quant.sf']
else:
args.quality = '0' if args.trust_ends else args.quality
if args.quality != '0':
subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'q.sam', 'w'), stderr=open(alignout+'q.samtools_stderr', 'w'))
align_files += [alignout+'sam']
else:
subprocess.call(['mv', alignout+'sam', alignout+'q.sam'])
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', alignout+'q.sam', \
'-o', alignout+'q.counts', '-t', args.t, '--quality', args.quality]
if args.stringent:
count_cmd += ['--stringent', '-i', args.o+'firstpass'+ext]
if args.trust_ends:
count_cmd += ['--trust_ends']
if args.generate_map:
count_cmd += ['--generate_map', args.o+'isoform.read.map.txt']
if subprocess.call(count_cmd):
sys.stderr.write('Failed at counting step for isoform read support\n')
return 1
count_file = alignout+'q.counts'
align_files += [alignout+'q.sam']
subprocess.call([sys.executable, path+'bin/combine_counts.py', count_file, args.o+'firstpass.q.counts'])
if not args.quiet: sys.stderr.write('Filtering isoforms by read coverage\n')
subprocess.call([sys.executable, path+'bin/match_counts.py', args.o+'firstpass.q.counts', \
args.o+'firstpass'+ext, args.s, args.o+'isoforms'+ext])
subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'isoforms'+ext, \
args.g, args.o+'isoforms.fa'])
if args.f:
subprocess.call([sys.executable, path+'bin/psl_to_gtf.py', args.o+'isoforms'+ext], \
stdout=open(args.o+'isoforms.gtf', 'w'))
subprocess.call(['rm', '-rf', args.o+'firstpass.fa', alignout+'q.counts'])
if not args.keep_intermediate:
subprocess.call(['rm', args.o+'firstpass.q.counts', args.o+'firstpass'+ext])
subprocess.call(['rm', '-rf'] + glob.glob(args.temp_dir+'*'+tempfile_name+'*') + align_files + intermediate)
return args.o+'isoforms.bed', args.o+'isoforms.fa'
def quantify(isoform_sequences=''):
parser = argparse.ArgumentParser(description='flair-quantify parse options', \
usage='python flair.py quantify -r reads_manifest.tsv -i isoforms.fa [options]')
parser.add_argument('quantify')
required = parser.add_argument_group('required named arguments')
if not isoform_sequences:
required.add_argument('-r', '--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
required.add_argument('-i', '--isoforms', action='store', dest='i', \
type=str, required=True, help='FastA of FLAIR collapsed isoforms')
else:
required.add_argument('--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='specify a samtools executable path if not in $PATH if --quality is also used')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='''minimum MAPQ of read assignment to an isoform. If using salmon, all alignments are
used (1)''')
parser.add_argument('-o', '--output', type=str, action='store', dest='o', \
default='counts_matrix.tsv', help='Counts matrix output file name prefix (counts_matrix.tsv)')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--tpm', action='store_true', dest='tpm', default=False, \
help='specify this flag to output additional file with expression in TPM')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='''directory to put temporary files. use "./" to indicate current directory
(default: python tempfile directory)''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Quantify unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoform_sequences:
args.i = isoform_sequences
args.o += '.counts_matrix.tsv'
try:
import numpy as np
import codecs
except:
sys.stderr.write('Numpy import error. Please pip install numpy. Exiting.\n')
sys.exit(1)
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality)
samData = list()
with codecs.open(args.r, "r", encoding='utf-8', errors='ignore') as lines:
for line in lines:
cols = line.rstrip().split('\t')
if len(cols)<4:
sys.stderr.write('Expected 4 columns in manifest.tsv, got %s. Exiting.\n' % len(cols))
return 1
sample, group, batch, readFile = cols
readFileRoot = tempfile.NamedTemporaryFile().name
if args.temp_dir != '':
if not os.path.isdir(args.temp_dir):
subprocess.call(['mkdir', args.temp_dir])
readFileRoot = args.temp_dir + '/' + readFileRoot[readFileRoot.rfind('/')+1:]
samData.append(cols + [readFileRoot + '.sam'])
for num,sample in enumerate(samData,0):
sys.stderr.write("Step 1/3. Aligning sample %s_%s: %s/%s \r" % (sample[0],sample[2],num+1,len(samData)))
mm2_command = [args.m, '-a', '-N', '4', '-t', args.t, args.i, sample[-2]]
try:
if subprocess.call(mm2_command, stdout=open(sample[-1], 'w'), \
stderr=open(sample[-1]+'.mm2_stderr.txt', 'w')):
sys.stderr.write('Check {} file\n'.format(sample[-1]+'.mm2_stderr.txt'))
return 1
except:
sys.stderr.write('''Possible minimap2 error, please check that all file, directory,
and executable paths exist\n''')
return 1
subprocess.call(['rm', sample[-1]+'.mm2_stderr.txt'])
sys.stderr.flush()
if args.quality != '0' and not args.trust_ends and not args.salmon:
if subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', sample[-1]], \
stdout=open(sample[-1]+'.qual.sam', 'w')):
return 1
subprocess.call(['mv', sample[-1]+'.qual.sam', sample[-1]])
countData = dict()
for num,data in enumerate(samData):
sample, group, batch, readFile, samOut = data
sys.stderr.write("Step 2/3. Quantifying isoforms for sample %s_%s: %s/%s \r" % (sample,batch,num+1,len(samData)))
if not args.salmon:
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', samOut, \
'-o', samOut+'.counts.txt', '-t', args.t, '--quality', args.quality]
if args.trust_ends:
count_cmd += ['--trust_ends']
subprocess.call(count_cmd)
for line in open(samOut+'.counts.txt'):
line = line.rstrip().split('\t')
iso, numreads = line[0], line[1]
if iso not in countData: countData[iso] = np.zeros(len(samData))
countData[iso][num] = numreads
else:
subprocess.call([args.salmon, 'quant', '-t', args.i, '-o', samOut[:-4]+'.salmon', \
'-p', args.t, '-l', 'U', '-a', samOut], stderr=open('salmon_stderr.txt', 'w'))
salmonOut = open(samOut[:-4]+'.salmon/quant.sf')
salmonOut.readline() # header
for line in salmonOut:
line = line.rstrip().split('\t')
iso, tpm, numreads = line[0], line[3], line[4]
if iso not in countData: countData[iso] = np.zeros(len(samData))
if args.tpm:
countData[iso][num] = tpm
else:
countData[iso][num] = numreads
subprocess.call(['rm', '-r', samOut[:-4]+'.salmon/', 'salmon_stderr.txt'])
sys.stderr.flush()
subprocess.call(['rm', samOut])
sys.stderr.write("Step 3/3. Writing counts to {} \r".format(args.o))
countMatrix = open(args.o,'w')
countMatrix.write("ids\t%s\n" % "\t".join(["_".join(x[:3]) for x in samData]))
features = sorted(list(countData.keys()))
for f in features:
countMatrix.write("%s\t%s\n" % (f,"\t".join(str(x) for x in countData[f])))
countMatrix.close()
sys.stderr.flush()
sys.stderr.write("\n")
if args.tpm and not args.salmon:
subprocess.call([sys.executable, path+'bin/counts_to_tpm.py', args.o, args.o+'.tpm.tsv'])
return args.o
def diffExp(counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffExp parse options', \
usage='python flair.py diffExp -q counts_matrix.tsv --out_dir out_dir [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not counts_matrix:
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='Tab-delimited isoform count matrix from flair quantify module.')
required.add_argument('-o', '--out_dir', action='store', dest='o', \
type=str, required=True, help='Output directory for tables and plots.')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=4, help='Number of threads for parallel DRIMSeq.')
parser.add_argument('-e', '--exp_thresh', action='store', dest='e', type=int, required=False, \
default=10, help='Read count expression threshold. Isoforms in which \
both conditions contain fewer than E reads are filtered out (Default E=10)')
parser.add_argument('-of', '--out_dir_force', action='store_true', dest='of', \
required=False, help='''Specify this argument to force overwriting of files in
an existing output directory''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffExp unrecognized arguments: {}\n'.format(' '.join(unknown)))
if counts_matrix:
args.q = counts_matrix
args.o+'.diffExp'
scriptsBin = path + "bin/"
runDE = scriptsBin + "deFLAIR.py"
DEcommand = [sys.executable, '-W ignore', runDE, '--filter', str(args.e), '--threads', \
str(args.t), '--outDir', args.o, '--matrix', args.q]
if args.of:
DEcommand += ['-of']
subprocess.call(DEcommand)
return
def diffSplice(isoforms='', counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffSplice parse options', \
usage='python flair.py diffSplice -i isoforms.bed|isoforms.psl -q counts_matrix.tsv [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not isoforms:
required.add_argument('-i', '--isoforms', action='store', dest='i', required=True, \
type=str, help='isoforms in bed or psl format')
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='tab-delimited isoform count matrix from flair quantify module')
parser.add_argument('-o', '--output', action='store', dest='o', default='flair.diffsplice', type=str, \
required=False, help='output file name base for FLAIR isoforms (default: flair.diffsplice)')
parser.add_argument('--test', action='store_true', dest='test', \
required=False, default=False, help='Run DRIMSeq statistical testing')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=1, help='Number of threads DRIMSeq (1)')
parser.add_argument('--drim1', action='store', dest='drim1', type=int, required=False, default=6, \
help='''The minimum number of samples that have coverage over an AS event inclusion/exclusion
for DRIMSeq testing; events with too few samples are filtered out and not tested (6)''')
parser.add_argument('--drim2', action='store', dest='drim2', type=int, required=False, default=3, \
help='''The minimum number of samples expressing the inclusion of an AS event;
events with too few samples are filtered out and not tested (3)''')
parser.add_argument('--drim3', action='store', dest='drim3', type=int, required=False, default=15, \
help='''The minimum number of reads covering an AS event inclusion/exclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (15)''')
parser.add_argument('--drim4', action='store', dest='drim4', type=int, required=False, default=5, \
help='''The minimum number of reads covering an AS event inclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (5)''')
parser.add_argument('--batch', action='store_true', dest='batch', required=False, default=False, \
help='''If specified with --test, DRIMSeq will perform batch correction''')
parser.add_argument('--conditionA', action='store', dest='conditionA', required=False, default='', \
help='''Specify one condition corresponding to samples in the counts_matrix to be compared against
condition2; by default, the first two unique conditions are used''')
parser.add_argument('--conditionB', action='store', dest='conditionB', required=False, default='', \
help='''Specify another condition corresponding to samples in the counts_matrix to be compared against
conditionA''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffSplice unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoforms:
args.i = isoforms
args.q = counts_matrix
if args.i[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.i, args.i+'.bed'])
args.i = args.i+'.bed'
subprocess.call([sys.executable, path+'bin/call_diffsplice_events.py', args.i, args.o, args.q])
subprocess.call([sys.executable, path+'bin/es_as.py', args.i], stdout=open(args.o+'.es.events.tsv','w'))
subprocess.call([sys.executable, path+'bin/es_as_inc_excl_to_counts.py', args.q, args.o+'.es.events.tsv'], \
stdout=open(args.o+'.es.events.quant.tsv','w'))
subprocess.call(['rm', args.o+'.es.events.tsv'])
if args.test or args.drim1 or args.drim2 or args.drim4 or args.drim4:
sys.stderr.write('DRIMSeq testing for each AS event type\n')
drim1, drim2, drim3, drim4 = [str(x) for x in [args.drim1, args.drim2, args.drim3, args.drim4]]
ds_command = [sys.executable, path+'bin/runDS.py', '--threads', str(args.t), \
'--drim1', drim1, '--drim2', drim2, '--drim3', drim3, '--drim4', drim4]
if args.batch:
ds_command += ['--batch']
if args.conditionA:
if not args.conditionB:
sys.stderr.write('Both conditionA and conditionB must be specified, or both left unspecified\n')
return 1
ds_command += ['--conditionA', args.conditionA, '--conditionB', args.conditionB]
with open(args.o+'.stderr.txt', 'w') as ds_stderr:
subprocess.call(ds_command + ['--matrix', args.o+'.es.events.quant.tsv', '--prefix', args.o+'.es'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt5.events.quant.tsv', '--prefix', args.o+'.alt5'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt3.events.quant.tsv', '--prefix', args.o+'.alt3'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.ir.events.quant.tsv', '--prefix', args.o+'.ir'], stderr=ds_stderr)
return
path = '/'.join(os.path.realpath(__file__).split("/")[:-1])+'/'
if len(sys.argv) < 2:
sys.stderr.write('usage: python flair.py <mode> --help \n')
sys.stderr.write('modes: align, correct, collapse, quantify, diffExp, diffSplice\n')
sys.stderr.write('Multiple modules can be run when specified using numbers, e.g.:\n')
sys.stderr.write('python flair.py 1234 ...')
sys.exit(1)
else:
mode = sys.argv[1].lower()
aligned_reads, corrected_reads, isoforms, isoform_sequences, counts_matrix = [0]*5
if mode == 'align' or '1' in mode:
status = align()
if status == 1:
sys.exit(1)
else:
aligned_reads = status
if mode == 'correct' or '2' in mode:
if aligned_reads:
status = correct(aligned_reads=aligned_reads)
else:
status = correct()
if status == 1:
sys.exit(1)
else:
corrected_reads = status
if mode == 'collapse' or ('3' in mode and '3.5' not in mode):
if corrected_reads:
status = collapse(corrected_reads=corrected_reads)
else:
status = collapse()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
if mode == 'collapse-range' or '3.5' in mode:
from multiprocessing import Pool
tempfile_name = tempfile.NamedTemporaryFile().name
run_id = tempfile_name[tempfile_name.rfind('/')+1:]
if corrected_reads and not aligned_reads:
sys.stderr.write('''Collapse 3.5 run consecutively without align module; will assume {}
to be the name of the aligned reads bam file\n'''.format(corrected_reads[:-18]+'.bam'))
status = collapse_range(corrected_reads=corrected_reads, \
aligned_reads=corrected_reads[:-18]+'.bam')
elif corrected_reads and aligned_reads:
status = collapse_range(corrected_reads=corrected_reads, aligned_reads=aligned_reads)
elif not corrected_reads and aligned_reads:
sys.stderr.write('Correct module not run...\n')
status = collapse_range(corrected_reads=aligned_reads, aligned_reads=aligned_reads)
else:
status = collapse_range()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
mode = mode.replace('3.5', 'x')
if mode == 'quantify' or '4' in mode:
if isoform_sequences:
status = quantify(isoform_sequences=isoform_sequences)
else:
status = quantify()
if status == 1:
sys.exit(1)
else:
counts_matrix = status
if mode == 'diffexp' or '5' in mode:
if counts_matrix:
status = diffExp(counts_matrix=counts_matrix)
else:
status = diffExp()
if status == 1:
sys.exit(1)
if mode == 'diffsplice' or '6' in mode:
if counts_matrix and isoforms:
status = diffSplice(isoforms=isoforms, counts_matrix=counts_matrix)
elif not isoforms and counts_matrix:
sys.stderr.write('DiffSplice run consecutively without collapse module, exiting\n')
sys.exit(1)
else:
status = diffSplice()
if status == 1:
sys.exit(1)
if mode == '--version':
sys.stderr.write('FLAIR v1.5.1\n')
| true
| true
|
f715cc97d4d920ab1b7a4b331266075d9ff64558
| 5,266
|
py
|
Python
|
zprime_search/python/PlotHistogram.py
|
cdragoiu/particle_physics
|
1814ea2f072ccfbf1412397b19a3b5dad7ddb639
|
[
"MIT"
] | null | null | null |
zprime_search/python/PlotHistogram.py
|
cdragoiu/particle_physics
|
1814ea2f072ccfbf1412397b19a3b5dad7ddb639
|
[
"MIT"
] | null | null | null |
zprime_search/python/PlotHistogram.py
|
cdragoiu/particle_physics
|
1814ea2f072ccfbf1412397b19a3b5dad7ddb639
|
[
"MIT"
] | null | null | null |
import ROOT, sys, uuid
from PlotStyle import *
from SharedData import *
# plot histograms ----------------------------------------------------------------------------------
def Plot(data, mcs, drawP, tag):
canvas = ROOT.TCanvas(str(uuid.uuid4()), '', 440, 100, GetW(), GetH())
SetCanvas(canvas)
if 'x' in drawP.log:
canvas.SetLogx()
if 'y' in drawP.log:
canvas.SetLogy()
legend = ROOT.TLegend(0.76, 0.94-0.04*(1+len(mcs)), 0.88, 0.94)
SetLegend(legend)
fileDT = ROOT.TFile.Open(data.fileName)
histDT = fileDT.Get(data.histName)
histDT.SetName(str(uuid.uuid4()))
if histDT.GetSumw2N() == 0:
histDT.Sumw2()
histDT.Rebin(drawP.bins)
histDT.SetLineColor(data.color)
histDT.SetMarkerColor(data.color)
histDT.SetMarkerStyle(20)
histDT.SetMarkerSize(1.0)
histDT.GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
histDT.GetYaxis().SetRangeUser(drawP.minY, drawP.maxY)
legend.AddEntry(histDT, data.legendName, 'p')
filesMC = []
histsMC = []
hstack = ROOT.THStack(str(uuid.uuid4()), '')
for mc in mcs:
filesMC.append(ROOT.TFile.Open(mc.fileName))
histsMC.append(filesMC[-1].Get(mc.histName))
histsMC[-1].SetName(str(uuid.uuid4()))
if histsMC[-1].GetSumw2N() == 0:
histsMC[-1].Sumw2()
histsMC[-1].Rebin(drawP.bins)
if 'QCD.root' in mc.fileName:
histsMC[-1].Scale(QcdSF(tag))
else:
histsMC[-1].Scale(McSF(tag)*Lumi(tag))
histsMC[-1].SetFillStyle(1001)
histsMC[-1].SetFillColor(mc.color)
histsMC[-1].SetLineColor(ROOT.kGray+3)
histsMC[-1].GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
histsMC[-1].GetYaxis().SetRangeUser(drawP.minY, drawP.maxY)
legend.AddEntry(histsMC[-1], mc.legendName, 'f')
hstack.Add(histsMC[-1])
histTMP = histDT.Clone(str(uuid.uuid4()))
histTMP.Reset()
histTMP.SetStats(0)
SetAxes(histTMP)
histTMP.GetXaxis().SetTitle(drawP.titleX)
histTMP.GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
histTMP.GetYaxis().SetTitle(drawP.titleY)
histTMP.GetYaxis().SetRangeUser(drawP.minY, drawP.maxY)
histTMP.Draw()
hstack.Draw('same hist')
histDT.Draw('same e p')
histTMP.Draw('same axis')
legend.Draw()
canvas.Update()
histSUM = histDT.Clone(str(uuid.uuid4()))
histSUM.Reset()
for hist in histsMC:
histSUM.Add(hist)
ratio = histDT.Clone(str(uuid.uuid4()))
ratio.Divide(histDT, histSUM)
canvasR = ROOT.TCanvas(str(uuid.uuid4()), '', 440, 130+GetH(), GetW(), GetH('R'))
SetCanvas(canvasR, 'R')
canvasR.SetGridy()
if 'x' in drawP.log:
canvasR.SetLogx()
ratio.SetLineColor(ROOT.kBlack)
ratio.SetMarkerColor(ROOT.kBlack)
ratio.SetMarkerStyle(20)
ratio.SetMarkerSize(1.0)
ratio.SetStats(0)
SetAxes(ratio, 'R')
ratio.GetXaxis().SetTitle(drawP.titleX)
ratio.GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
ratio.GetYaxis().SetTitle('Data / MC')
ratio.GetYaxis().SetRangeUser(0.5, 1.5)
ratio.Draw('e p')
canvasR.Update()
raw_input('...')
# run as the main program only ---------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'usage: ' + sys.argv[0] + ' path histName runType [command]'
print ' path = path to histograms with prefix'
print ' histName = name of histogram to plot'
print ' runType = ele, hf'
print ' command = noQCD'
sys.exit()
path = sys.argv[1]
name = sys.argv[2]
tag = sys.argv[3]
if len(sys.argv) == 5 and sys.argv[4] == 'noQCD':
addQCD = False
else:
addQCD = True
if tag == 'ele':
data = Hist(name, path + 'DoubleElectron.root', 'Data', ROOT.kBlack)
elif tag == 'hf':
data = Hist(name, path + 'SingleElectron.root', 'Data', ROOT.kBlack)
mcs = []
if tag == 'ele':
mcs.append(Hist(name, path + 'WJets.root', ' W+jets', ROOT.kCyan-3))
mcs.append(Hist(name, path + 'TW.root', ' tW,#bar{t}W', ROOT.kMagenta-3))
mcs.append(Hist(name, path + 'DYtoTauTau.root', ' Z#rightarrow#tau#tau', ROOT.kYellow-3))
if addQCD:
mcs.append(Hist(name, path + 'QCD.root', ' jets', ROOT.kGray+1))
mcs.append(Hist(name, path + 'TT.root', ' t#bar{t}', ROOT.kRed-3))
mcs.append(Hist(name, path + 'VV.root', ' WW,WZ,ZZ', ROOT.kGreen-3))
mcs.append(Hist(name, path + 'DYtoEE.root', ' Z#rightarrowee', ROOT.kAzure-3))
elif tag == 'hf':
mcs.append(Hist(name, path + 'TW.root', ' tW,#bar{t}W', ROOT.kMagenta-3))
mcs.append(Hist(name, path + 'DYtoTauTau.root', ' Z#rightarrow#tau#tau', ROOT.kYellow-3))
mcs.append(Hist(name, path + 'TT.root', ' t#bar{t}', ROOT.kRed-3))
mcs.append(Hist(name, path + 'VV.root', ' WW,WZ,ZZ', ROOT.kGreen-3))
mcs.append(Hist(name, path + 'WJets.root', ' W+jets', ROOT.kCyan-3))
if addQCD:
mcs.append(Hist(name, path + 'QCD.root', ' jets', ROOT.kGray+1))
mcs.append(Hist(name, path + 'DYtoEE.root', ' Z#rightarrowee', ROOT.kAzure-3))
Plot(data, mcs, GetDrawP(tag+'_'+name), tag)
| 41.140625
| 100
| 0.588872
|
import ROOT, sys, uuid
from PlotStyle import *
from SharedData import *
def Plot(data, mcs, drawP, tag):
canvas = ROOT.TCanvas(str(uuid.uuid4()), '', 440, 100, GetW(), GetH())
SetCanvas(canvas)
if 'x' in drawP.log:
canvas.SetLogx()
if 'y' in drawP.log:
canvas.SetLogy()
legend = ROOT.TLegend(0.76, 0.94-0.04*(1+len(mcs)), 0.88, 0.94)
SetLegend(legend)
fileDT = ROOT.TFile.Open(data.fileName)
histDT = fileDT.Get(data.histName)
histDT.SetName(str(uuid.uuid4()))
if histDT.GetSumw2N() == 0:
histDT.Sumw2()
histDT.Rebin(drawP.bins)
histDT.SetLineColor(data.color)
histDT.SetMarkerColor(data.color)
histDT.SetMarkerStyle(20)
histDT.SetMarkerSize(1.0)
histDT.GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
histDT.GetYaxis().SetRangeUser(drawP.minY, drawP.maxY)
legend.AddEntry(histDT, data.legendName, 'p')
filesMC = []
histsMC = []
hstack = ROOT.THStack(str(uuid.uuid4()), '')
for mc in mcs:
filesMC.append(ROOT.TFile.Open(mc.fileName))
histsMC.append(filesMC[-1].Get(mc.histName))
histsMC[-1].SetName(str(uuid.uuid4()))
if histsMC[-1].GetSumw2N() == 0:
histsMC[-1].Sumw2()
histsMC[-1].Rebin(drawP.bins)
if 'QCD.root' in mc.fileName:
histsMC[-1].Scale(QcdSF(tag))
else:
histsMC[-1].Scale(McSF(tag)*Lumi(tag))
histsMC[-1].SetFillStyle(1001)
histsMC[-1].SetFillColor(mc.color)
histsMC[-1].SetLineColor(ROOT.kGray+3)
histsMC[-1].GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
histsMC[-1].GetYaxis().SetRangeUser(drawP.minY, drawP.maxY)
legend.AddEntry(histsMC[-1], mc.legendName, 'f')
hstack.Add(histsMC[-1])
histTMP = histDT.Clone(str(uuid.uuid4()))
histTMP.Reset()
histTMP.SetStats(0)
SetAxes(histTMP)
histTMP.GetXaxis().SetTitle(drawP.titleX)
histTMP.GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
histTMP.GetYaxis().SetTitle(drawP.titleY)
histTMP.GetYaxis().SetRangeUser(drawP.minY, drawP.maxY)
histTMP.Draw()
hstack.Draw('same hist')
histDT.Draw('same e p')
histTMP.Draw('same axis')
legend.Draw()
canvas.Update()
histSUM = histDT.Clone(str(uuid.uuid4()))
histSUM.Reset()
for hist in histsMC:
histSUM.Add(hist)
ratio = histDT.Clone(str(uuid.uuid4()))
ratio.Divide(histDT, histSUM)
canvasR = ROOT.TCanvas(str(uuid.uuid4()), '', 440, 130+GetH(), GetW(), GetH('R'))
SetCanvas(canvasR, 'R')
canvasR.SetGridy()
if 'x' in drawP.log:
canvasR.SetLogx()
ratio.SetLineColor(ROOT.kBlack)
ratio.SetMarkerColor(ROOT.kBlack)
ratio.SetMarkerStyle(20)
ratio.SetMarkerSize(1.0)
ratio.SetStats(0)
SetAxes(ratio, 'R')
ratio.GetXaxis().SetTitle(drawP.titleX)
ratio.GetXaxis().SetRangeUser(drawP.minX, drawP.maxX)
ratio.GetYaxis().SetTitle('Data / MC')
ratio.GetYaxis().SetRangeUser(0.5, 1.5)
ratio.Draw('e p')
canvasR.Update()
raw_input('...')
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'usage: ' + sys.argv[0] + ' path histName runType [command]'
print ' path = path to histograms with prefix'
print ' histName = name of histogram to plot'
print ' runType = ele, hf'
print ' command = noQCD'
sys.exit()
path = sys.argv[1]
name = sys.argv[2]
tag = sys.argv[3]
if len(sys.argv) == 5 and sys.argv[4] == 'noQCD':
addQCD = False
else:
addQCD = True
if tag == 'ele':
data = Hist(name, path + 'DoubleElectron.root', 'Data', ROOT.kBlack)
elif tag == 'hf':
data = Hist(name, path + 'SingleElectron.root', 'Data', ROOT.kBlack)
mcs = []
if tag == 'ele':
mcs.append(Hist(name, path + 'WJets.root', ' W+jets', ROOT.kCyan-3))
mcs.append(Hist(name, path + 'TW.root', ' tW,#bar{t}W', ROOT.kMagenta-3))
mcs.append(Hist(name, path + 'DYtoTauTau.root', ' Z#rightarrow#tau#tau', ROOT.kYellow-3))
if addQCD:
mcs.append(Hist(name, path + 'QCD.root', ' jets', ROOT.kGray+1))
mcs.append(Hist(name, path + 'TT.root', ' t#bar{t}', ROOT.kRed-3))
mcs.append(Hist(name, path + 'VV.root', ' WW,WZ,ZZ', ROOT.kGreen-3))
mcs.append(Hist(name, path + 'DYtoEE.root', ' Z#rightarrowee', ROOT.kAzure-3))
elif tag == 'hf':
mcs.append(Hist(name, path + 'TW.root', ' tW,#bar{t}W', ROOT.kMagenta-3))
mcs.append(Hist(name, path + 'DYtoTauTau.root', ' Z#rightarrow#tau#tau', ROOT.kYellow-3))
mcs.append(Hist(name, path + 'TT.root', ' t#bar{t}', ROOT.kRed-3))
mcs.append(Hist(name, path + 'VV.root', ' WW,WZ,ZZ', ROOT.kGreen-3))
mcs.append(Hist(name, path + 'WJets.root', ' W+jets', ROOT.kCyan-3))
if addQCD:
mcs.append(Hist(name, path + 'QCD.root', ' jets', ROOT.kGray+1))
mcs.append(Hist(name, path + 'DYtoEE.root', ' Z#rightarrowee', ROOT.kAzure-3))
Plot(data, mcs, GetDrawP(tag+'_'+name), tag)
| false
| true
|
f715cce6602d941edd23731c966b3365ce9f1f13
| 1,536
|
py
|
Python
|
polling_stations/apps/data_collection/management/commands/import_hertsmere.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_hertsmere.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_hertsmere.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
council_id = 'E07000098'
srid = 27700
districts_srid = 27700
districts_name = 'PollingDistricts'
stations_name = 'PollingStations.shp'
elections = [
'local.hertfordshire.2017-05-04',
'parl.2017-06-08'
]
def district_record_to_dict(self, record):
return {
'internal_council_id': str(record[0]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': str(record[0]).strip(),
}
def format_address(self, record):
address_parts = [record[x].strip() for x in range(3, 7)]
for i, part in enumerate(address_parts):
if part == b'':
address_parts[i] = ''
for i, part in enumerate(address_parts):
if len(part) <= 3 and len(part) > 0:
address_parts[i+1] = part + ' ' + address_parts[i+1]
address_parts[i] = ''
break
address = "\n".join(address_parts)
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return address.strip()
def station_record_to_dict(self, record):
postcode = record[8].strip()
if postcode == b'':
postcode = ''
return {
'internal_council_id': str(record[1]).strip(),
'address' : self.format_address(record),
'postcode': postcode,
}
| 34.133333
| 83
| 0.570313
|
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
council_id = 'E07000098'
srid = 27700
districts_srid = 27700
districts_name = 'PollingDistricts'
stations_name = 'PollingStations.shp'
elections = [
'local.hertfordshire.2017-05-04',
'parl.2017-06-08'
]
def district_record_to_dict(self, record):
return {
'internal_council_id': str(record[0]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': str(record[0]).strip(),
}
def format_address(self, record):
address_parts = [record[x].strip() for x in range(3, 7)]
for i, part in enumerate(address_parts):
if part == b'':
address_parts[i] = ''
for i, part in enumerate(address_parts):
if len(part) <= 3 and len(part) > 0:
address_parts[i+1] = part + ' ' + address_parts[i+1]
address_parts[i] = ''
break
address = "\n".join(address_parts)
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return address.strip()
def station_record_to_dict(self, record):
postcode = record[8].strip()
if postcode == b'':
postcode = ''
return {
'internal_council_id': str(record[1]).strip(),
'address' : self.format_address(record),
'postcode': postcode,
}
| true
| true
|
f715cd43b332d5cf3dd6ecd996c97808734feaac
| 1,497
|
py
|
Python
|
superset/db_engine_specs/gsheets.py
|
ayuanty/superset
|
132a8ef2cb55fa6692ea31d5c278f102d6c2886b
|
[
"Apache-2.0"
] | 1
|
2022-01-03T08:36:11.000Z
|
2022-01-03T08:36:11.000Z
|
superset/db_engine_specs/gsheets.py
|
ayuanty/superset
|
132a8ef2cb55fa6692ea31d5c278f102d6c2886b
|
[
"Apache-2.0"
] | 63
|
2021-06-12T18:25:14.000Z
|
2022-03-21T07:57:02.000Z
|
superset/db_engine_specs/gsheets.py
|
ayuanty/superset
|
132a8ef2cb55fa6692ea31d5c278f102d6c2886b
|
[
"Apache-2.0"
] | 1
|
2021-10-01T20:16:18.000Z
|
2021-10-01T20:16:18.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from sqlalchemy.engine.url import URL
from superset import security_manager
from superset.db_engine_specs.sqlite import SqliteEngineSpec
class GSheetsEngineSpec(SqliteEngineSpec):
"""Engine for Google spreadsheets"""
engine = "gsheets"
engine_name = "Google Sheets"
allows_joins = False
allows_subqueries = True
@classmethod
def modify_url_for_impersonation(
cls, url: URL, impersonate_user: bool, username: Optional[str]
) -> None:
if impersonate_user and username is not None:
user = security_manager.find_user(username=username)
if user and user.email:
url.query["subject"] = user.email
| 36.512195
| 70
| 0.741483
|
from typing import Optional
from sqlalchemy.engine.url import URL
from superset import security_manager
from superset.db_engine_specs.sqlite import SqliteEngineSpec
class GSheetsEngineSpec(SqliteEngineSpec):
engine = "gsheets"
engine_name = "Google Sheets"
allows_joins = False
allows_subqueries = True
@classmethod
def modify_url_for_impersonation(
cls, url: URL, impersonate_user: bool, username: Optional[str]
) -> None:
if impersonate_user and username is not None:
user = security_manager.find_user(username=username)
if user and user.email:
url.query["subject"] = user.email
| true
| true
|
f715cd46be73951aea27a4ea4d8cd743000fd4dd
| 1,164
|
py
|
Python
|
test/functional/p2p_mempool.py
|
Pirontechv/Bitchain
|
7ca7b6a8090f221d6982b09891c19ca5b7ace1d0
|
[
"MIT"
] | 1
|
2020-03-13T14:59:52.000Z
|
2020-03-13T14:59:52.000Z
|
test/functional/p2p_mempool.py
|
Pirontechv/Bitchain
|
7ca7b6a8090f221d6982b09891c19ca5b7ace1d0
|
[
"MIT"
] | null | null | null |
test/functional/p2p_mempool.py
|
Pirontechv/Bitchain
|
7ca7b6a8090f221d6982b09891c19ca5b7ace1d0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitchainTestFramework
from test_framework.util import *
class P2PMempoolTests(BitchainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
# Add a p2p connection
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
#request mempool
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 32.333333
| 73
| 0.717354
|
from test_framework.mininode import *
from test_framework.test_framework import BitchainTestFramework
from test_framework.util import *
class P2PMempoolTests(BitchainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| true
| true
|
f715ce83ac789168b60816ea12ee97e12d21dee1
| 5,162
|
py
|
Python
|
tinyquery/repeated_util.py
|
graingert/tinyquery
|
f26940a2ad240911e278ef7c82e3f14e0f4c5e4e
|
[
"MIT"
] | 104
|
2015-02-21T22:54:15.000Z
|
2022-03-21T11:08:02.000Z
|
tinyquery/repeated_util.py
|
graingert/tinyquery
|
f26940a2ad240911e278ef7c82e3f14e0f4c5e4e
|
[
"MIT"
] | 14
|
2018-01-30T16:32:09.000Z
|
2022-03-02T12:57:11.000Z
|
tinyquery/repeated_util.py
|
graingert/tinyquery
|
f26940a2ad240911e278ef7c82e3f14e0f4c5e4e
|
[
"MIT"
] | 28
|
2015-09-16T22:42:44.000Z
|
2022-01-15T11:51:45.000Z
|
"""Helper functions for dealing with repeated fields.
It comes up in a few places that we need to flatten or unflatten repeated
columns when using them in conjunction with other repeated or scalar fields.
These functions allow us to flatten into non-repeated columns to apply various
operations and then unflatten back into repeated columns afterwards.
"""
from __future__ import absolute_import
from tinyquery import tq_modes
def rebuild_column_values(repetitions, values, result):
"""Rebuild a repeated column from flattened results.
Args:
repetitions: a list of how many repeated values go in a row for
each of the rows to process.
values: a list of all the values that need to be packed into lists
result: a (partial) result list to which the rows will be appended.
Returns:
a list of lists of values representing len(repetitions) rows, each
of which with a number of values corresponding to that row's
entry in repetitions
"""
if len(repetitions) == 0:
return result
curr_repetition = repetitions[0]
# For rows with no values, we supplied a None, so we need to pop
# off one value no matter what. If that value is None, we go back
# to an empty list, otherwise we put the value in a list.
curr_values = normalize_repeated_null(values[:max(curr_repetition, 1)])
return rebuild_column_values(
repetitions[1:],
values[max(curr_repetition, 1):],
result + [curr_values])
def normalize_column_to_length(col, desired_count):
"""Given the value(s) for a column, normalize to a desired length.
If `col` is a scalar, it's duplicated in a list the desired number of
times. If `col` is a list, it must have 0, 1, or the desired number of
elements, in which cases `None` or the single element is duplicated, or
the original list is returned.
"""
desired_count = max(desired_count, 1)
if isinstance(col, list) and len(col) == desired_count:
return col
elif isinstance(col, list):
assert len(col) in (0, 1), (
'Unexpectedly got a row with the incorrect number of '
'repeated values.')
return (col or [None]) * desired_count
else:
return [col] * desired_count
def flatten_column_values(repeated_column_indices, column_values):
"""Take a list of columns and flatten them.
We need to acomplish three things during the flattening:
1. Flatten out any repeated fields.
2. Keep track of how many repeated values were in each row so that we
can go back
3. If there are other columns, duplicate their values so that we have
the same number of entries in all columns after flattening.
Args:
repeated_column_indices: the indices of the columns that
are repeated; if there's more than one repeated column, this
function assumes that we've already checked that the lengths of
these columns will match up, or that they have 0 or 1 element.
column_values: a list containing a list for each column's values.
Returns:
(repetition_counts, flattened_columns): a tuple
repetition_counts: a list containing one number per row,
representing the number of repeated values in that row
flattened_columns: a list containing one list for each column's
values. The list for each column will not contain nested
lists.
"""
# wrapping in list for python 3 support
rows = list(zip(*column_values))
repetition_counts = [
max(max(len(row[idx]) for idx in repeated_column_indices), 1)
for row in rows
]
rows_with_repetition_normalized = [
[
normalize_column_to_length(col, count)
for col in row
]
for row, count in zip(rows, repetition_counts)
]
normalized_columns = zip(*rows_with_repetition_normalized)
flattened_columns = [
[val for arr in col for val in arr]
for col in normalized_columns]
return (repetition_counts, flattened_columns)
def columns_have_allowed_repetition_counts(ref_col, col):
"""Determine if we could select col along with ref_col.
We assume ref_col is repeated. In tinyquery this is allowable if any of
the following is true:
- col is not repeated
- col is repeated but every row has only 0 or 1 element
- col is repeated but every row with more than 1 element matches the number
of elements in ref_col
"""
if col.mode != tq_modes.REPEATED:
return True
ref_counts = [len(val) for val in ref_col.values]
counts = [len(val) for val in col.values]
return all(
rc == c or c in (0, 1) or rc in (0, 1)
for rc, c in zip(ref_counts, counts))
def normalize_repeated_null(value):
"""Normalze the way we represent null in repeated fields.
There's 3 equivalent options: `None`, [], and `[None]`. We chose [] to be
the standard for repeated fields, so this turns any of these into [].
"""
if value is None or value == [None]:
return []
return value
| 38.522388
| 79
| 0.678419
|
from __future__ import absolute_import
from tinyquery import tq_modes
def rebuild_column_values(repetitions, values, result):
if len(repetitions) == 0:
return result
curr_repetition = repetitions[0]
curr_values = normalize_repeated_null(values[:max(curr_repetition, 1)])
return rebuild_column_values(
repetitions[1:],
values[max(curr_repetition, 1):],
result + [curr_values])
def normalize_column_to_length(col, desired_count):
desired_count = max(desired_count, 1)
if isinstance(col, list) and len(col) == desired_count:
return col
elif isinstance(col, list):
assert len(col) in (0, 1), (
'Unexpectedly got a row with the incorrect number of '
'repeated values.')
return (col or [None]) * desired_count
else:
return [col] * desired_count
def flatten_column_values(repeated_column_indices, column_values):
rows = list(zip(*column_values))
repetition_counts = [
max(max(len(row[idx]) for idx in repeated_column_indices), 1)
for row in rows
]
rows_with_repetition_normalized = [
[
normalize_column_to_length(col, count)
for col in row
]
for row, count in zip(rows, repetition_counts)
]
normalized_columns = zip(*rows_with_repetition_normalized)
flattened_columns = [
[val for arr in col for val in arr]
for col in normalized_columns]
return (repetition_counts, flattened_columns)
def columns_have_allowed_repetition_counts(ref_col, col):
if col.mode != tq_modes.REPEATED:
return True
ref_counts = [len(val) for val in ref_col.values]
counts = [len(val) for val in col.values]
return all(
rc == c or c in (0, 1) or rc in (0, 1)
for rc, c in zip(ref_counts, counts))
def normalize_repeated_null(value):
if value is None or value == [None]:
return []
return value
| true
| true
|
f715ceb943279ec375261a9adc1d7aa35db8622f
| 12,515
|
py
|
Python
|
imputena/simple_imputation/linear_regression.py
|
macarro/imputena
|
3a94ae1419a2af0d9707b20546ee078929ce99e8
|
[
"MIT"
] | 6
|
2020-04-27T21:21:47.000Z
|
2022-03-30T03:02:54.000Z
|
imputena/simple_imputation/linear_regression.py
|
macarro/imputena
|
3a94ae1419a2af0d9707b20546ee078929ce99e8
|
[
"MIT"
] | 1
|
2021-07-01T18:49:27.000Z
|
2021-07-01T18:49:27.000Z
|
imputena/simple_imputation/linear_regression.py
|
macarro/imputena
|
3a94ae1419a2af0d9707b20546ee078929ce99e8
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn import linear_model
import logging
def linear_regression(
data=None, dependent=None, predictors=None, regressions='available',
noise=False, inplace=False):
"""Performs simple or multiple linear regression imputation on the data.
First, the regression equation for the dependent variable given the
predictor variables is computed. For this step, all rows that contain a
missing value in either the dependent variable or any of the predictor
variable is ignored via pairwise deletion. Then, missing valued in the
dependent column in imputed using the regression equation. If, in the same
row as a missing value in the dependent variable the value for any
predictor variable is missing, a regression model based on all available
predictors in calculated just to impute those values where the
predictor(s) are missing. This behavior can be changed by assigning to
the parameter regressions the value 'complete'. In this case, rows in
which a predictor variable is missing do not get imputed. If stochastic
regression imputation should be performed, set noise=True. In this
case, a random value is chosen from a normal distribution with the width
of the standard error of the regression model and added to the imputed
value. If the parameter predictors is omitted, all variables other than
the dependent are used as predictors. If the parameter dependent is
omitted, the operation is performed on all columns that contain missing
values.
:param data: The data on which to perform the linear regression imputation.
:type data: pandas.DataFrame
:param dependent: The dependent variable in which the missing values
should be imputed.
:type dependent: String, optional
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like, optional
:param regressions: If 'available': Impute missing values by modeling a
regression based on all available predictors if some predictors have
missing values themselves. If 'complete': Only impute with a
regression model based on all predictors and leave missing values in
rows in which some predictor value is missing itself unimputed.
:type regressions: {'available', 'complete'}, default 'available'
:param noise: Whether to add noise to the imputed values (stochastic
regression imputation)
:type noise: bool, default False
:param inplace: If True, do operation inplace and return None.
:type inplace: bool, default False
:return: The dataframe with linear regression imputation performed for the
incomplete variable(s) or None if inplace=True.
:rtype: pandas.DataFrame or None
:raises: TypeError, ValueError
"""
# Check if data is a dataframe:
if not isinstance(data, pd.DataFrame):
raise TypeError('The data has to be a DataFrame.')
# Check if the dependent variable is actually a column of the dataframe:
if dependent is not None and dependent not in data.columns:
raise ValueError(
'\'' + dependent + '\' is not a column of the data.')
# Check if each of the predictor variables is actually a column of the
# dataframe:
if predictors is not None:
for column in predictors:
if column not in data.columns:
raise ValueError(
'\'' + column + '\' is not a column of the data.')
# Assign value to do_available_regressions
if regressions == 'available':
do_available_regressions = True
elif regressions == 'complete':
do_available_regressions = False
else:
raise ValueError(regressions + 'could not be understood')
# Assign a reference or copy to res, depending on inplace:
if inplace:
res = data
else:
res = data.copy()
# If dependent is not set, apply the operation to each column that contains
# missing data:
if dependent is None:
for column in data.columns:
if data[column].isna().any():
res.loc[:, :] = linear_regression_one_dependent(
res, column, predictors, do_available_regressions,
noise)
# Otherwise apply the operation to the dependent column only:
else:
res.loc[:, :] = linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise)
# Return dataframe if the operation is not to be performed inplace:
if not inplace:
return res
def linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise):
"""Auxiliary function that performs linear regression imputation for the
dependent column. The difference with linear_regression() is that in
that function dependent can be None, in which case this function is
called for each column containing missing values,
:param data: The data on which to perform the linear regression imputation.
:type data: pandas.DataFrame
:param dependent: The dependent variable in which the missing values
should be imputed.
:type dependent: String
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like
:param do_available_regressions: Whether to do regressions for all
available predictor combinations or only on complete ones
:type do_available_regressions: bool
:param noise: Whether to add noise to the imputed values (stochastic
regression imputation)
:type noise: bool
:return: The dataframe with linear regression imputation performed for the
incomplete variable.
:rtype: pandas.DataFrame
"""
# This auxiliary function always returns a copy:
res = data.copy()
# If predictors is None, all variables except for the dependent one are
# considered predictors:
if predictors is None:
predictors = list(data.columns)
predictors.remove(dependent)
# Predictor combination sets and lists
limited_predictors_combs = set()
predictors_combs_done = []
predictors_combs_todo = [tuple(predictors)]
# Perform the operation:
while len(predictors_combs_todo) > 0:
# Select iteration predictors
it_predictors = predictors_combs_todo.pop(0)
# Log iteration beginning:
logging.info('Applying regression imputation with predictors: ' + str(
it_predictors))
# Perform iteration:
res.loc[:, :] = linear_regression_iter(
res, dependent, list(it_predictors), noise,
limited_predictors_combs)
# Update predictor combinations done and to do
predictors_combs_done.append(it_predictors)
if do_available_regressions:
predictors_combs_todo = list(
set(limited_predictors_combs) - set(predictors_combs_done))
# Log iteration end:
logging.info('Predictor combinations done: ' + str(
predictors_combs_done))
logging.info('Predictor combinations to do: ' + str(
predictors_combs_todo))
return res
def linear_regression_iter(
data, dependent, predictors, noise, limited_predictors_combs):
"""Auxiliary function that performs (simple or multiple) linear
regression imputation on the data, for the dependent column only. In rows
that contain a missing value for any predictor variable, the value of the
dependent variable does not get imputed. The operation is always
performed on a copy of the data, which is returned.
:param data: The data on which to perform the linear regression imputation.
:type data: pandas.DataFrame
:param dependent: The dependent variable in which the missing values
should be imputed.
:type dependent: String
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like
:param noise: Whether to add noise to the imputed value (stochastic
regression imputation)
:type noise: bool
:param limited_predictors_combs: Reference to the set which contains all
limited predictor combinations that are necessary to use because
some predictor had a missing value in some row.
:type limited_predictors_combs: set
:return: A copy of the dataframe with linear regression imputation
performed for the incomplete variable.
:rtype: pandas.DataFrame
"""
# Perform pairwise deletion before calculating the regression
data_pairwise_deleted = data.copy()
variables = predictors.copy()
variables.append(dependent)
data_pairwise_deleted.dropna(subset=variables, inplace=True)
# Calculate the regression:
x = data_pairwise_deleted[predictors]
y = data_pairwise_deleted[dependent]
model = linear_model.LinearRegression()
model.fit(x, y)
# Extract the regression parameters from the model
intercept = model.intercept_
coefs = model.coef_
# Log regression equation:
eq = str(dependent) + ' = ' + str(intercept)
for idx, coef in enumerate(coefs):
eq += ' + ' + str(coef) + '*' + predictors[idx]
logging.info('Regression equation: ' + eq)
# Calculate standard error:
std_error = (model.predict(x) - y).std()
logging.info('Standard error: ' + str(std_error))
# Implementation using apply:
return data.apply(
lambda row: get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs),
axis=1, result_type='broadcast')
def get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs):
"""Auxiliary function that receives a row of a DataFrame and returns the
same row. If the row contains a missing value for the dependent variable,
it gets imputed according to the regression equation specified by
predictors, intercept and coefs.
:param row: The row for which the missing value should be imputed
:type row: pandas.Series
:param dependent: The dependent variable for which the row might contain a
missing value
:type dependent: String
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like
:param intercept: The y-intercept of the regression equation.
:type intercept: scalar
:param coefs: The coefficients of the regression equation, in the same
order as the predictors.
:type coefs: array-like,
:param noise: Whether to add noise to the imputed value (stochastic
regression imputation)
:type noise: bool
:param std_error: The standard error of the regression model. Required
if noise=True
:type std_error: scalar
:param limited_predictors_combs: Reference to the set which contains all
limited predictor combinations that are necessary to use because
some predictor had a missing value in some row.
:type limited_predictors_combs: set
:return: The row, with the missing value imputed if it contains one.
:rtype: pandas.Series
"""
res = row.copy()
if pd.isnull(res[dependent]):
# Check whether there are predictors for which the value is NA
na_predictors = tuple(
row[predictors][row[predictors].isnull()].index.to_list())
# If the row contains NA values for one or several predictors,
# add the combination of predictors to na_predictor_combs, in order
# to perform regression without them:
if na_predictors != ():
limited_predictors = tuple(set(predictors) - set(na_predictors))
# Add the limited_predictors to the set only if the combination
# isn't empty:
if limited_predictors != ():
limited_predictors_combs.add(limited_predictors)
# If the row doesn't contain missing values for any predictor, impute:
else:
value = intercept
for idx, coef in enumerate(coefs):
value += coef * row[predictors[idx]]
# If noise == True, add noise (stochastic regression imputation)
if noise:
value += std_error * np.random.randn()
res[dependent] = value
return res
| 46.180812
| 79
| 0.696524
|
import pandas as pd
import numpy as np
from sklearn import linear_model
import logging
def linear_regression(
data=None, dependent=None, predictors=None, regressions='available',
noise=False, inplace=False):
if not isinstance(data, pd.DataFrame):
raise TypeError('The data has to be a DataFrame.')
if dependent is not None and dependent not in data.columns:
raise ValueError(
'\'' + dependent + '\' is not a column of the data.')
if predictors is not None:
for column in predictors:
if column not in data.columns:
raise ValueError(
'\'' + column + '\' is not a column of the data.')
if regressions == 'available':
do_available_regressions = True
elif regressions == 'complete':
do_available_regressions = False
else:
raise ValueError(regressions + 'could not be understood')
if inplace:
res = data
else:
res = data.copy()
if dependent is None:
for column in data.columns:
if data[column].isna().any():
res.loc[:, :] = linear_regression_one_dependent(
res, column, predictors, do_available_regressions,
noise)
else:
res.loc[:, :] = linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise)
if not inplace:
return res
def linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise):
res = data.copy()
if predictors is None:
predictors = list(data.columns)
predictors.remove(dependent)
limited_predictors_combs = set()
predictors_combs_done = []
predictors_combs_todo = [tuple(predictors)]
while len(predictors_combs_todo) > 0:
it_predictors = predictors_combs_todo.pop(0)
logging.info('Applying regression imputation with predictors: ' + str(
it_predictors))
res.loc[:, :] = linear_regression_iter(
res, dependent, list(it_predictors), noise,
limited_predictors_combs)
predictors_combs_done.append(it_predictors)
if do_available_regressions:
predictors_combs_todo = list(
set(limited_predictors_combs) - set(predictors_combs_done))
logging.info('Predictor combinations done: ' + str(
predictors_combs_done))
logging.info('Predictor combinations to do: ' + str(
predictors_combs_todo))
return res
def linear_regression_iter(
data, dependent, predictors, noise, limited_predictors_combs):
data_pairwise_deleted = data.copy()
variables = predictors.copy()
variables.append(dependent)
data_pairwise_deleted.dropna(subset=variables, inplace=True)
x = data_pairwise_deleted[predictors]
y = data_pairwise_deleted[dependent]
model = linear_model.LinearRegression()
model.fit(x, y)
intercept = model.intercept_
coefs = model.coef_
eq = str(dependent) + ' = ' + str(intercept)
for idx, coef in enumerate(coefs):
eq += ' + ' + str(coef) + '*' + predictors[idx]
logging.info('Regression equation: ' + eq)
std_error = (model.predict(x) - y).std()
logging.info('Standard error: ' + str(std_error))
return data.apply(
lambda row: get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs),
axis=1, result_type='broadcast')
def get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs):
res = row.copy()
if pd.isnull(res[dependent]):
na_predictors = tuple(
row[predictors][row[predictors].isnull()].index.to_list())
if na_predictors != ():
limited_predictors = tuple(set(predictors) - set(na_predictors))
if limited_predictors != ():
limited_predictors_combs.add(limited_predictors)
# If the row doesn't contain missing values for any predictor, impute:
else:
value = intercept
for idx, coef in enumerate(coefs):
value += coef * row[predictors[idx]]
if noise:
value += std_error * np.random.randn()
res[dependent] = value
return res
| true
| true
|
f715d03e6e3bce6f65c548086393381517fcc295
| 143
|
py
|
Python
|
packages/pyolite-kernel/py/piplite/piplite/__init__.py
|
luzpaz/jupyterlite
|
4b9d9419918a4ac53bb45b78a3d44d0ca2cd9665
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyolite-kernel/py/piplite/piplite/__init__.py
|
luzpaz/jupyterlite
|
4b9d9419918a4ac53bb45b78a3d44d0ca2cd9665
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyolite-kernel/py/piplite/piplite/__init__.py
|
luzpaz/jupyterlite
|
4b9d9419918a4ac53bb45b78a3d44d0ca2cd9665
|
[
"BSD-3-Clause"
] | null | null | null |
"""A configurable Python package backed by Pyodide's micropip"""
from .piplite import install
__version__ = "0.1.0a23"
__all__ = ["install"]
| 20.428571
| 64
| 0.734266
|
from .piplite import install
__version__ = "0.1.0a23"
__all__ = ["install"]
| true
| true
|
f715d0e64ba7e66d2862699c039894e7931be245
| 7,609
|
py
|
Python
|
custom_components/jlrincontrol/services.py
|
stefferber/homeassistant-jlrincontrol
|
d11d931e097cc011047b1ad128f9a4340822117c
|
[
"MIT"
] | 27
|
2020-04-16T06:47:41.000Z
|
2022-01-06T01:55:54.000Z
|
custom_components/jlrincontrol/services.py
|
stefferber/homeassistant-jlrincontrol
|
d11d931e097cc011047b1ad128f9a4340822117c
|
[
"MIT"
] | 40
|
2020-04-16T07:13:08.000Z
|
2022-02-08T21:27:49.000Z
|
custom_components/jlrincontrol/services.py
|
stefferber/homeassistant-jlrincontrol
|
d11d931e097cc011047b1ad128f9a4340822117c
|
[
"MIT"
] | 15
|
2020-04-16T07:09:19.000Z
|
2022-03-02T07:06:49.000Z
|
import inspect
import logging
import asyncio
from urllib import error
from functools import partial
from .const import DOMAIN, JLR_DATA
from .util import convert_temp_value
_LOGGER = logging.getLogger(__name__)
class JLRService:
def __init__(self, hass, config_entry, vin):
self.hass = hass
self.data = hass.data[DOMAIN][config_entry.entry_id][JLR_DATA]
self.vin = vin
self.vehicle = self.data.vehicles[vin]
self.service_code = None
self.service_name = None
self.attributes = self.vehicle.attributes
self.nickname = self.attributes.get("nickname")
async def validate_service_call(self):
if self.service_code and self.service_name:
# Check this is a valid service
if self.check_service_enabled(self.service_code):
# Check no other service calls are awaiting
if not await self.async_get_services():
# OK to make service call
return True
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Another request is still processing. "
+ "Please try again later."
)
else:
_LOGGER.debug(
"Service {} is not available on vehicle {}".format(
self.service_name, self.nickname,
)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
return False
async def async_call_service(self, **kwargs):
self.service_code = kwargs.get("service_code")
self.service_name = kwargs.get("service_name")
if await self.validate_service_call():
service_kwargs = {}
# populate required parameters for service call
service = getattr(self.vehicle, self.service_name)
for param in inspect.signature(service).parameters:
if param in ["target_value", "target_temp"]:
# convert temp values to car requirements
service_kwargs[param] = convert_temp_value(
self.hass.config.units.temperature_unit,
self.service_code,
kwargs.get(param),
)
else:
service_kwargs[param] = kwargs.get(param)
# Call service
try:
status = await self.hass.async_add_executor_job(
partial(service, **service_kwargs)
)
_LOGGER.info(
"Service {} called on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Awaiting feedback on success."
)
# monitor service for success / failure
monitor_status = await self.async_monitor_service_call(
status.get("customerServiceId")
)
return monitor_status
except error.HTTPError as ex:
if ex.code == 401:
_LOGGER.warning(
"Service: {} on vehicle {} ".format(
self.service_name, self.nickname,
)
+ "- not authorised error. Is your pin correct?"
)
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex.msg)
)
except Exception as ex:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
def check_service_enabled(self, service_code):
"""Check service code is capable and enabled"""
if service_code == "NA":
return True
else:
for service in self.attributes.get("availableServices"):
if service.get("serviceType") == service_code:
if service.get("vehicleCapable") and service.get(
"serviceEnabled"
):
return True
return False
async def async_get_services(self):
"""Check for any exisitng queued service calls to vehicle"""
services = await self.hass.async_add_executor_job(
self.vehicle.get_services
)
if services:
services = services.get("services")
# Check if duplicate
for service in services:
service_id = service.replace(
"/vehicles/{}/services/".format(self.vin), ""
)
# Check service to see if matched to this service call
# TODO: need to test for equivalents like RDL and RDU
try:
status = await self.hass.async_add_executor_job(
partial(self.vehicle.get_service_status, service_id)
)
if status:
if status.get("serviceType") == self.service_code:
return True
except Exception:
pass
return False
else:
return False
async def async_check_service_status(self, service_id):
"""Get status of current service call"""
return await self.hass.async_add_executor_job(
self.vehicle.get_service_status, service_id
)
async def async_monitor_service_call(self, service_id):
result = await self.async_check_service_status(service_id)
if result:
status = result.get("status")
while status == "Started":
_LOGGER.info(
"Checking for {} service call result status.".format(
self.service_name
)
)
await asyncio.sleep(5)
result = await self.async_check_service_status(service_id)
status = result.get("status")
if status and status in ["Successful", "MessageDelivered"]:
_LOGGER.info(
"Service call ({}) to vehicle {} was successful".format(
self.service_name, self.nickname
)
)
return "Successful"
else:
_LOGGER.info(
"InControl service call ({}) to vehicle {} ".format(
self.service_name, self.nickname,
)
+ "failed due to {}. \r\nFull return is {}".format(
result.get("failureReason"), result,
)
)
return status
else:
return None
| 37.29902
| 76
| 0.490078
|
import inspect
import logging
import asyncio
from urllib import error
from functools import partial
from .const import DOMAIN, JLR_DATA
from .util import convert_temp_value
_LOGGER = logging.getLogger(__name__)
class JLRService:
def __init__(self, hass, config_entry, vin):
self.hass = hass
self.data = hass.data[DOMAIN][config_entry.entry_id][JLR_DATA]
self.vin = vin
self.vehicle = self.data.vehicles[vin]
self.service_code = None
self.service_name = None
self.attributes = self.vehicle.attributes
self.nickname = self.attributes.get("nickname")
async def validate_service_call(self):
if self.service_code and self.service_name:
if self.check_service_enabled(self.service_code):
if not await self.async_get_services():
return True
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Another request is still processing. "
+ "Please try again later."
)
else:
_LOGGER.debug(
"Service {} is not available on vehicle {}".format(
self.service_name, self.nickname,
)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
return False
async def async_call_service(self, **kwargs):
self.service_code = kwargs.get("service_code")
self.service_name = kwargs.get("service_name")
if await self.validate_service_call():
service_kwargs = {}
service = getattr(self.vehicle, self.service_name)
for param in inspect.signature(service).parameters:
if param in ["target_value", "target_temp"]:
service_kwargs[param] = convert_temp_value(
self.hass.config.units.temperature_unit,
self.service_code,
kwargs.get(param),
)
else:
service_kwargs[param] = kwargs.get(param)
try:
status = await self.hass.async_add_executor_job(
partial(service, **service_kwargs)
)
_LOGGER.info(
"Service {} called on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Awaiting feedback on success."
)
monitor_status = await self.async_monitor_service_call(
status.get("customerServiceId")
)
return monitor_status
except error.HTTPError as ex:
if ex.code == 401:
_LOGGER.warning(
"Service: {} on vehicle {} ".format(
self.service_name, self.nickname,
)
+ "- not authorised error. Is your pin correct?"
)
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex.msg)
)
except Exception as ex:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
def check_service_enabled(self, service_code):
if service_code == "NA":
return True
else:
for service in self.attributes.get("availableServices"):
if service.get("serviceType") == service_code:
if service.get("vehicleCapable") and service.get(
"serviceEnabled"
):
return True
return False
async def async_get_services(self):
services = await self.hass.async_add_executor_job(
self.vehicle.get_services
)
if services:
services = services.get("services")
for service in services:
service_id = service.replace(
"/vehicles/{}/services/".format(self.vin), ""
)
try:
status = await self.hass.async_add_executor_job(
partial(self.vehicle.get_service_status, service_id)
)
if status:
if status.get("serviceType") == self.service_code:
return True
except Exception:
pass
return False
else:
return False
async def async_check_service_status(self, service_id):
return await self.hass.async_add_executor_job(
self.vehicle.get_service_status, service_id
)
async def async_monitor_service_call(self, service_id):
result = await self.async_check_service_status(service_id)
if result:
status = result.get("status")
while status == "Started":
_LOGGER.info(
"Checking for {} service call result status.".format(
self.service_name
)
)
await asyncio.sleep(5)
result = await self.async_check_service_status(service_id)
status = result.get("status")
if status and status in ["Successful", "MessageDelivered"]:
_LOGGER.info(
"Service call ({}) to vehicle {} was successful".format(
self.service_name, self.nickname
)
)
return "Successful"
else:
_LOGGER.info(
"InControl service call ({}) to vehicle {} ".format(
self.service_name, self.nickname,
)
+ "failed due to {}. \r\nFull return is {}".format(
result.get("failureReason"), result,
)
)
return status
else:
return None
| true
| true
|
f715d167d46ffec6d0102269f517067c5bcb0733
| 821
|
py
|
Python
|
ch08_dash_standard_components/table_handmade_stylecell.py
|
Ethan0621/plotly-dash-dev
|
abe478824db1ee511a2d92f88e5dad49f5d6e27e
|
[
"MIT"
] | 21
|
2020-10-02T08:17:33.000Z
|
2022-03-22T06:10:17.000Z
|
ch08_dash_standard_components/table_handmade_stylecell.py
|
Ethan0621/plotly-dash-dev
|
abe478824db1ee511a2d92f88e5dad49f5d6e27e
|
[
"MIT"
] | 4
|
2019-07-18T04:43:31.000Z
|
2021-10-31T10:30:25.000Z
|
ch08_dash_standard_components/table_handmade_stylecell.py
|
Ethan0621/plotly-dash-dev
|
abe478824db1ee511a2d92f88e5dad49f5d6e27e
|
[
"MIT"
] | 12
|
2019-07-23T05:36:57.000Z
|
2021-07-11T08:57:47.000Z
|
import dash
import dash_table
app = dash.Dash(__name__)
app.layout = dash_table.DataTable(
fill_width=False,
columns=[
{"name": "number", "id": "number"},
{"name": "region", "id": "area"},
{"name": "tsuyu-iri", "id": "tsuyu-iri"},
],
data=[
{"number": 0, "area": "okinawa", "tsuyu-iri": "5/16"},
{"number": 1, "area": "kyusyu-south", "tsuyu-iri": "5/31"},
{"number": 2, "area": "kyusyu-north", "tsuyu-iri": "6/26"},
{"number": 3, "area": "shikoku", "tsuyu-iri": "6/26"},
{"number": 4, "area": "chugoku", "tsuyu-iri": "6/26"},
{"number": 5, "area": "kinki", "tsuyu-iri": "6/26"},
],
# ➊ テーブル全体のセルのスタイルを定義(横幅、文字の大きさ、文字の揃え位置)
style_cell={"width": 160, "fontSize": 24, "textAlign": "center"},
)
app.run_server(debug=True)
| 31.576923
| 69
| 0.527406
|
import dash
import dash_table
app = dash.Dash(__name__)
app.layout = dash_table.DataTable(
fill_width=False,
columns=[
{"name": "number", "id": "number"},
{"name": "region", "id": "area"},
{"name": "tsuyu-iri", "id": "tsuyu-iri"},
],
data=[
{"number": 0, "area": "okinawa", "tsuyu-iri": "5/16"},
{"number": 1, "area": "kyusyu-south", "tsuyu-iri": "5/31"},
{"number": 2, "area": "kyusyu-north", "tsuyu-iri": "6/26"},
{"number": 3, "area": "shikoku", "tsuyu-iri": "6/26"},
{"number": 4, "area": "chugoku", "tsuyu-iri": "6/26"},
{"number": 5, "area": "kinki", "tsuyu-iri": "6/26"},
],
style_cell={"width": 160, "fontSize": 24, "textAlign": "center"},
)
app.run_server(debug=True)
| true
| true
|
f715d1f15c0f57ec6888a0eaa987c5954b2e0137
| 12,603
|
py
|
Python
|
ForgeActivity/forgeactivity/main.py
|
isabella232/allura
|
04f14f15a9a9364e18c61f68acdaa241a470186b
|
[
"Apache-2.0"
] | 113
|
2015-03-25T10:33:37.000Z
|
2022-02-16T20:55:06.000Z
|
ForgeActivity/forgeactivity/main.py
|
apache/allura
|
6184203235ac6f83c943fae7fd3fef54678f9ed7
|
[
"Apache-2.0"
] | 4
|
2017-08-04T16:19:07.000Z
|
2020-06-08T19:01:33.000Z
|
ForgeActivity/forgeactivity/main.py
|
isabella232/allura
|
04f14f15a9a9364e18c61f68acdaa241a470186b
|
[
"Apache-2.0"
] | 36
|
2015-08-14T16:27:39.000Z
|
2022-02-16T20:54:35.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import calendar
from datetime import timedelta
from itertools import islice
from bson import ObjectId
from ming.orm import session
from tg import tmpl_context as c, app_globals as g
from tg import request, response
from tg import expose, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from paste.deploy.converters import asbool, asint
from webob import exc
import feedgenerator as FG
from activitystream.storage.mingstorage import Activity
from allura.app import Application
from allura import version
from allura import model as M
from allura.controllers import BaseController
from allura.controllers.rest import AppRestControllerMixin
from allura.lib.security import require_authenticated, require_access
from allura.model.timeline import perm_check, get_activity_object
from allura.lib import helpers as h
from allura.lib.decorators import require_post
from allura.lib.widgets.form_fields import PageList
from allura.ext.user_profile import ProfileSectionBase
from .widgets.follow import FollowToggle
from six.moves import filter
import re
log = logging.getLogger(__name__)
class ForgeActivityApp(Application):
"""Project Activity page for projects."""
__version__ = version.__version__
default_mount_point = 'activity'
max_instances = 0
searchable = False
has_notifications = False
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ForgeActivityController(self)
self.api_root = ForgeActivityRestController(self)
def admin_menu(self): # pragma no cover
return []
def install(self, project):
role_anon = M.ProjectRole.by_name('*anonymous')._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
]
def uninstall(self, project):
pass # pragma no cover
class W:
follow_toggle = FollowToggle()
page_list = PageList()
class ForgeActivityController(BaseController):
def __init__(self, app, *args, **kw):
super(ForgeActivityController, self).__init__(*args, **kw)
self.app = app
setattr(self, 'feed.atom', self.feed)
setattr(self, 'feed.rss', self.feed)
def _check_security(self):
require_access(c.app, 'read')
def _before(self, *args, **kw):
"""Runs before each request to this controller.
"""
# register the custom css for our tool
g.register_app_css('css/activity.css', app=self.app)
def _get_activities_data(self, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
c.follow_toggle = W.follow_toggle
c.page_list = W.page_list
if c.project.is_user_project:
followee = c.project.user_project_of
actor_only = followee != c.user
else:
followee = c.project
actor_only = False
following = g.director.is_connected(c.user, followee)
limit, page = h.paging_sanitizer(kw.get('limit', 100), kw.get('page', 0))
extra_limit = limit
# get more in case perm check filters some out
if page == 0 and limit <= 10:
extra_limit = limit * 20
timeline = g.director.get_timeline(followee, page,
limit=extra_limit,
actor_only=actor_only)
filtered_timeline = list(islice(filter(perm_check(c.user), timeline),
0, limit))
if config.get("default_avatar_image"):
for t in filtered_timeline:
if not t.actor.activity_extras.get('icon_url'):
t.actor.activity_extras.icon_url = config['default_avatar_image']
else:
t.actor.activity_extras.icon_url = re.sub(r'([&?])d=[^&]*',
r'\1d={}'.format(config["default_avatar_image"]),
t.actor.activity_extras.icon_url)
session(t).expunge(t) # don't save back this change
if extra_limit == limit:
# if we didn't ask for extra, then we expect there's more if we got all we asked for
has_more = len(timeline) == limit
else:
# if we did ask for extra, check filtered result
has_more = len(filtered_timeline) == limit
return dict(
followee=followee,
following=following,
timeline=filtered_timeline,
noindex=False if filtered_timeline else True,
page=page,
limit=limit,
has_more=has_more,
actor_only=actor_only)
@expose('jinja:forgeactivity:templates/index.html')
@with_trailing_slash
def index(self, **kw):
return self._get_activities_data(**kw)
@expose('jinja:forgeactivity:templates/timeline.html')
def pjax(self, **kw):
return self._get_activities_data(**kw)
@without_trailing_slash
@expose()
def feed(self, **kw):
data = self._get_activities_data(**kw)
response.headers['Content-Type'] = str('')
response.content_type = str('application/xml')
d = {
'title': 'Activity for %s' % data['followee'].activity_name,
'link': h.absurl(self.app.url),
'description': 'Recent activity for %s' % (
data['followee'].activity_name),
'language': 'en',
}
if request.environ['PATH_INFO'].endswith(str('.atom')):
feed = FG.Atom1Feed(**d)
else:
feed = FG.Rss201rev2Feed(**d)
for t in data['timeline']:
url_id = h.absurl(t.obj.activity_url) # try to keep this consistent over time (not url-quoted)
url = h.absurl(h.urlquote_path_only(t.obj.activity_url))
feed.add_item(title='%s %s %s%s' % (
t.actor.activity_name,
t.verb,
t.obj.activity_name,
' on %s' % t.target.activity_name if t.target.activity_name else '',
),
link=url,
pubdate=t.published,
description=h.strip_bad_unicode(t.obj.activity_extras.get('summary', '')),
unique_id=url_id,
author_name=t.actor.activity_name,
author_link=h.absurl(t.actor.activity_url))
return feed.writeString('utf-8')
@require_post()
@expose('json:')
@validate(W.follow_toggle)
def follow(self, follow, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
require_authenticated()
followee = c.project
if c.project.is_user_project:
followee = c.project.user_project_of
if c.user == followee:
return dict(
success=False,
message='Cannot follow yourself')
try:
if follow:
g.director.connect(c.user, followee)
else:
g.director.disconnect(c.user, followee)
except Exception as e:
log.exception('Unexpected error following user')
return dict(
success=False,
message='Unexpected error: %s' % e)
return dict(
success=True,
message=W.follow_toggle.success_message(follow),
following=follow)
@require_post()
@expose('json:')
def delete_item(self, activity_id, **kwargs):
require_access(c.project.neighborhood, 'admin')
activity = Activity.query.get(_id=ObjectId(activity_id))
if not activity:
raise exc.HTTPGone
# find other copies of this activity on other user/projects timelines
# but only within a small time window, so we can do efficient searching
activity_ts = activity._id.generation_time
time_window = timedelta(hours=1)
all_copies = Activity.query.find({
'_id': {
'$gt': ObjectId.from_datetime(activity_ts - time_window),
'$lt': ObjectId.from_datetime(activity_ts + time_window),
},
'obj': activity.obj,
'target': activity.target,
'actor': activity.actor,
'verb': activity.verb,
'tags': activity.tags,
}).all()
log.info('Deleting %s copies of activity record: %s %s %s', len(all_copies),
activity.actor.activity_url, activity.verb, activity.obj.activity_url)
for activity in all_copies:
activity.query.delete()
return {'success': True}
class ForgeActivityRestController(BaseController, AppRestControllerMixin):
def __init__(self, app, *args, **kw):
super(ForgeActivityRestController, self).__init__(*args, **kw)
self.app = app
def _check_security(self):
require_access(c.app, 'read')
@expose('json:')
def index(self, **kw):
data = self.app.root._get_activities_data(**kw)
return {
'following': data['following'],
'followee': {
'activity_name': data['followee'].activity_name,
'activity_url': data['followee'].url(),
'activity_extras': {},
},
'timeline': [{
'published': calendar.timegm(a.published.timetuple()) * 1000,
'actor': a.actor._deinstrument(),
'verb': a.verb,
'obj': a.obj._deinstrument(),
'target': a.target._deinstrument(),
'tags': a.tags._deinstrument(),
} for a in data['timeline']],
}
class ForgeActivityProfileSection(ProfileSectionBase):
template = 'forgeactivity:templates/widgets/profile_section.html'
def __init__(self, *a, **kw):
super(ForgeActivityProfileSection, self).__init__(*a, **kw)
self.activity_app = self.project.app_instance('activity')
def check_display(self):
app_installed = self.activity_app is not None
activity_enabled = asbool(config.get('activitystream.enabled', False))
return app_installed and activity_enabled
def prepare_context(self, context):
full_timeline = g.director.get_timeline(
self.user, page=0, limit=100,
actor_only=True,
)
filtered_timeline = list(islice(filter(perm_check(c.user), full_timeline),
0, 8))
for activity in filtered_timeline:
# Get the project for the activity.obj so we can use it in the
# template. Expunge first so Ming doesn't try to flush the attr
# we create to temporarily store the project.
#
# The get_activity_object() calls are cheap, pulling from
# the session identity map instead of mongo since identical
# calls are made by perm_check() above.
session(activity).expunge(activity)
activity_obj = get_activity_object(activity.obj)
activity.obj.project = getattr(activity_obj, 'project', None)
context.update({
'follow_toggle': W.follow_toggle,
'following': g.director.is_connected(c.user, self.user),
'timeline': filtered_timeline,
'activity_app': self.activity_app,
})
g.register_js('activity_js/follow.js')
return context
| 38.42378
| 111
| 0.611521
|
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import calendar
from datetime import timedelta
from itertools import islice
from bson import ObjectId
from ming.orm import session
from tg import tmpl_context as c, app_globals as g
from tg import request, response
from tg import expose, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from paste.deploy.converters import asbool, asint
from webob import exc
import feedgenerator as FG
from activitystream.storage.mingstorage import Activity
from allura.app import Application
from allura import version
from allura import model as M
from allura.controllers import BaseController
from allura.controllers.rest import AppRestControllerMixin
from allura.lib.security import require_authenticated, require_access
from allura.model.timeline import perm_check, get_activity_object
from allura.lib import helpers as h
from allura.lib.decorators import require_post
from allura.lib.widgets.form_fields import PageList
from allura.ext.user_profile import ProfileSectionBase
from .widgets.follow import FollowToggle
from six.moves import filter
import re
log = logging.getLogger(__name__)
class ForgeActivityApp(Application):
__version__ = version.__version__
default_mount_point = 'activity'
max_instances = 0
searchable = False
has_notifications = False
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ForgeActivityController(self)
self.api_root = ForgeActivityRestController(self)
def admin_menu(self):
return []
def install(self, project):
role_anon = M.ProjectRole.by_name('*anonymous')._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
]
def uninstall(self, project):
pass
class W:
follow_toggle = FollowToggle()
page_list = PageList()
class ForgeActivityController(BaseController):
def __init__(self, app, *args, **kw):
super(ForgeActivityController, self).__init__(*args, **kw)
self.app = app
setattr(self, 'feed.atom', self.feed)
setattr(self, 'feed.rss', self.feed)
def _check_security(self):
require_access(c.app, 'read')
def _before(self, *args, **kw):
g.register_app_css('css/activity.css', app=self.app)
def _get_activities_data(self, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
c.follow_toggle = W.follow_toggle
c.page_list = W.page_list
if c.project.is_user_project:
followee = c.project.user_project_of
actor_only = followee != c.user
else:
followee = c.project
actor_only = False
following = g.director.is_connected(c.user, followee)
limit, page = h.paging_sanitizer(kw.get('limit', 100), kw.get('page', 0))
extra_limit = limit
if page == 0 and limit <= 10:
extra_limit = limit * 20
timeline = g.director.get_timeline(followee, page,
limit=extra_limit,
actor_only=actor_only)
filtered_timeline = list(islice(filter(perm_check(c.user), timeline),
0, limit))
if config.get("default_avatar_image"):
for t in filtered_timeline:
if not t.actor.activity_extras.get('icon_url'):
t.actor.activity_extras.icon_url = config['default_avatar_image']
else:
t.actor.activity_extras.icon_url = re.sub(r'([&?])d=[^&]*',
r'\1d={}'.format(config["default_avatar_image"]),
t.actor.activity_extras.icon_url)
session(t).expunge(t)
if extra_limit == limit:
# if we didn't ask for extra, then we expect there's more if we got all we asked for
has_more = len(timeline) == limit
else:
# if we did ask for extra, check filtered result
has_more = len(filtered_timeline) == limit
return dict(
followee=followee,
following=following,
timeline=filtered_timeline,
noindex=False if filtered_timeline else True,
page=page,
limit=limit,
has_more=has_more,
actor_only=actor_only)
@expose('jinja:forgeactivity:templates/index.html')
@with_trailing_slash
def index(self, **kw):
return self._get_activities_data(**kw)
@expose('jinja:forgeactivity:templates/timeline.html')
def pjax(self, **kw):
return self._get_activities_data(**kw)
@without_trailing_slash
@expose()
def feed(self, **kw):
data = self._get_activities_data(**kw)
response.headers['Content-Type'] = str('')
response.content_type = str('application/xml')
d = {
'title': 'Activity for %s' % data['followee'].activity_name,
'link': h.absurl(self.app.url),
'description': 'Recent activity for %s' % (
data['followee'].activity_name),
'language': 'en',
}
if request.environ['PATH_INFO'].endswith(str('.atom')):
feed = FG.Atom1Feed(**d)
else:
feed = FG.Rss201rev2Feed(**d)
for t in data['timeline']:
url_id = h.absurl(t.obj.activity_url) # try to keep this consistent over time (not url-quoted)
url = h.absurl(h.urlquote_path_only(t.obj.activity_url))
feed.add_item(title='%s %s %s%s' % (
t.actor.activity_name,
t.verb,
t.obj.activity_name,
' on %s' % t.target.activity_name if t.target.activity_name else '',
),
link=url,
pubdate=t.published,
description=h.strip_bad_unicode(t.obj.activity_extras.get('summary', '')),
unique_id=url_id,
author_name=t.actor.activity_name,
author_link=h.absurl(t.actor.activity_url))
return feed.writeString('utf-8')
@require_post()
@expose('json:')
@validate(W.follow_toggle)
def follow(self, follow, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
require_authenticated()
followee = c.project
if c.project.is_user_project:
followee = c.project.user_project_of
if c.user == followee:
return dict(
success=False,
message='Cannot follow yourself')
try:
if follow:
g.director.connect(c.user, followee)
else:
g.director.disconnect(c.user, followee)
except Exception as e:
log.exception('Unexpected error following user')
return dict(
success=False,
message='Unexpected error: %s' % e)
return dict(
success=True,
message=W.follow_toggle.success_message(follow),
following=follow)
@require_post()
@expose('json:')
def delete_item(self, activity_id, **kwargs):
require_access(c.project.neighborhood, 'admin')
activity = Activity.query.get(_id=ObjectId(activity_id))
if not activity:
raise exc.HTTPGone
# find other copies of this activity on other user/projects timelines
# but only within a small time window, so we can do efficient searching
activity_ts = activity._id.generation_time
time_window = timedelta(hours=1)
all_copies = Activity.query.find({
'_id': {
'$gt': ObjectId.from_datetime(activity_ts - time_window),
'$lt': ObjectId.from_datetime(activity_ts + time_window),
},
'obj': activity.obj,
'target': activity.target,
'actor': activity.actor,
'verb': activity.verb,
'tags': activity.tags,
}).all()
log.info('Deleting %s copies of activity record: %s %s %s', len(all_copies),
activity.actor.activity_url, activity.verb, activity.obj.activity_url)
for activity in all_copies:
activity.query.delete()
return {'success': True}
class ForgeActivityRestController(BaseController, AppRestControllerMixin):
def __init__(self, app, *args, **kw):
super(ForgeActivityRestController, self).__init__(*args, **kw)
self.app = app
def _check_security(self):
require_access(c.app, 'read')
@expose('json:')
def index(self, **kw):
data = self.app.root._get_activities_data(**kw)
return {
'following': data['following'],
'followee': {
'activity_name': data['followee'].activity_name,
'activity_url': data['followee'].url(),
'activity_extras': {},
},
'timeline': [{
'published': calendar.timegm(a.published.timetuple()) * 1000,
'actor': a.actor._deinstrument(),
'verb': a.verb,
'obj': a.obj._deinstrument(),
'target': a.target._deinstrument(),
'tags': a.tags._deinstrument(),
} for a in data['timeline']],
}
class ForgeActivityProfileSection(ProfileSectionBase):
template = 'forgeactivity:templates/widgets/profile_section.html'
def __init__(self, *a, **kw):
super(ForgeActivityProfileSection, self).__init__(*a, **kw)
self.activity_app = self.project.app_instance('activity')
def check_display(self):
app_installed = self.activity_app is not None
activity_enabled = asbool(config.get('activitystream.enabled', False))
return app_installed and activity_enabled
def prepare_context(self, context):
full_timeline = g.director.get_timeline(
self.user, page=0, limit=100,
actor_only=True,
)
filtered_timeline = list(islice(filter(perm_check(c.user), full_timeline),
0, 8))
for activity in filtered_timeline:
# Get the project for the activity.obj so we can use it in the
# template. Expunge first so Ming doesn't try to flush the attr
session(activity).expunge(activity)
activity_obj = get_activity_object(activity.obj)
activity.obj.project = getattr(activity_obj, 'project', None)
context.update({
'follow_toggle': W.follow_toggle,
'following': g.director.is_connected(c.user, self.user),
'timeline': filtered_timeline,
'activity_app': self.activity_app,
})
g.register_js('activity_js/follow.js')
return context
| true
| true
|
f715d278f72818778ed7a4eac29242b89126d982
| 1,827
|
py
|
Python
|
setup/authorization.py
|
LenoxFro/spotify-save-discover-weekly
|
1fecd101ad21a96dbb8fef6b402358386e0e5687
|
[
"MIT"
] | 8
|
2021-03-31T22:05:56.000Z
|
2022-01-01T22:42:59.000Z
|
setup/authorization.py
|
LenoxFro/spotify-save-discover-weekly
|
1fecd101ad21a96dbb8fef6b402358386e0e5687
|
[
"MIT"
] | null | null | null |
setup/authorization.py
|
LenoxFro/spotify-save-discover-weekly
|
1fecd101ad21a96dbb8fef6b402358386e0e5687
|
[
"MIT"
] | 10
|
2021-03-31T22:11:58.000Z
|
2022-03-31T10:55:36.000Z
|
import urllib.parse
from urllib.parse import parse_qs
from dotenv import load_dotenv, find_dotenv
import requests
import base64
import os
load_dotenv(find_dotenv())
CLIENT_ID = os.environ.get("CLIENT_ID")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET")
REDIRECT_URI = os.environ.get("REDIRECT_URI")
OAUTH_AUTHORIZE_URL = "https://accounts.spotify.com/authorize"
OAUTH_TOKEN_URL = "https://accounts.spotify.com/api/token"
SCOPE = "user-library-read playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative"
def get_auth_url():
payload = {
"client_id": CLIENT_ID,
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE
}
urlparams = urllib.parse.urlencode(payload)
return ("%s?%s" % (OAUTH_AUTHORIZE_URL, urlparams))
def get_refresh_token(code):
payload = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": REDIRECT_URI
}
encoded_client = base64.b64encode((CLIENT_ID + ":" + CLIENT_SECRET).encode('ascii'))
headers = {"Authorization": "Basic %s" % encoded_client.decode('ascii')}
response = requests.post(OAUTH_TOKEN_URL, data=payload, headers=headers)
return response.json()['refresh_token']
def authorization():
if CLIENT_ID is None or CLIENT_SECRET is None or REDIRECT_URI is None:
print("Environment variables have not been loaded!")
return
print("Open this link in your browser: %s \n" % get_auth_url() )
redirected_url = input("Enter URL you was redirected to (after accepting authorization): ")
parsed_url = urllib.parse.urlparse(redirected_url)
code = parse_qs(parsed_url.query)['code'][0]
refresh_token = get_refresh_token(code)
print("\n Your refresh token is: %s" % refresh_token)
authorization()
| 34.471698
| 124
| 0.712644
|
import urllib.parse
from urllib.parse import parse_qs
from dotenv import load_dotenv, find_dotenv
import requests
import base64
import os
load_dotenv(find_dotenv())
CLIENT_ID = os.environ.get("CLIENT_ID")
CLIENT_SECRET = os.environ.get("CLIENT_SECRET")
REDIRECT_URI = os.environ.get("REDIRECT_URI")
OAUTH_AUTHORIZE_URL = "https://accounts.spotify.com/authorize"
OAUTH_TOKEN_URL = "https://accounts.spotify.com/api/token"
SCOPE = "user-library-read playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative"
def get_auth_url():
payload = {
"client_id": CLIENT_ID,
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE
}
urlparams = urllib.parse.urlencode(payload)
return ("%s?%s" % (OAUTH_AUTHORIZE_URL, urlparams))
def get_refresh_token(code):
payload = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": REDIRECT_URI
}
encoded_client = base64.b64encode((CLIENT_ID + ":" + CLIENT_SECRET).encode('ascii'))
headers = {"Authorization": "Basic %s" % encoded_client.decode('ascii')}
response = requests.post(OAUTH_TOKEN_URL, data=payload, headers=headers)
return response.json()['refresh_token']
def authorization():
if CLIENT_ID is None or CLIENT_SECRET is None or REDIRECT_URI is None:
print("Environment variables have not been loaded!")
return
print("Open this link in your browser: %s \n" % get_auth_url() )
redirected_url = input("Enter URL you was redirected to (after accepting authorization): ")
parsed_url = urllib.parse.urlparse(redirected_url)
code = parse_qs(parsed_url.query)['code'][0]
refresh_token = get_refresh_token(code)
print("\n Your refresh token is: %s" % refresh_token)
authorization()
| true
| true
|
f715d3e1902988bafee7ba1ae8d7e9743ffae740
| 1,362
|
py
|
Python
|
abt/cli/add.py
|
kamikat/a2torrent
|
543cea17f81c0cebfcb14a72b7c4d78fcbd9599e
|
[
"MIT"
] | 5
|
2017-06-18T03:30:54.000Z
|
2019-02-28T16:36:16.000Z
|
abt/cli/add.py
|
kamikat/a2torrent
|
543cea17f81c0cebfcb14a72b7c4d78fcbd9599e
|
[
"MIT"
] | 1
|
2017-12-11T06:54:13.000Z
|
2017-12-18T15:20:36.000Z
|
abt/cli/add.py
|
kamikat/a2torrent
|
543cea17f81c0cebfcb14a72b7c4d78fcbd9599e
|
[
"MIT"
] | 1
|
2017-12-11T07:06:13.000Z
|
2017-12-11T07:06:13.000Z
|
#!/usr/bin/env python2
"""
Add BitTorrent download task
"""
import argparse
import abt.cli as cli
import abt.rpc_client as client
import base64
import os
import tempfile
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=cli.progname, description=__doc__.strip())
parser.add_argument('torrent', nargs='?', action='store', help="path to torrent file")
parser.add_argument('--uri', action='store', help="load torrent file from uri")
conn, extra = cli.parse_connection_options()
options, extra = cli.parse_option_dict(extra)
args = parser.parse_args(extra)
if (not args.torrent) == (not args.uri):
parser.error("Exactly one of torrent file path or --uri is expected.")
aria2, _ = client.connect(**conn)
if args.uri:
_, torrent_file = tempfile.mkstemp()
fmt = "aria2c --no-conf --allow-overwrite=true --follow-torrent=false --async-dns=false --dir=%s --out=%s \"%s\""
cmd = fmt % (os.path.dirname(torrent_file), os.path.basename(torrent_file), args.uri)
ret = os.system(cmd)
if ret != 0:
cli.error("Failed. Cannot fetch torrent from %s" % args.uri)
else:
torrent_file = args.torrent
torrent_data = open(torrent_file, 'rb').read()
torrent_b64 = base64.b64encode(torrent_data)
print aria2.addTorrent(torrent_b64, [], options)
| 33.219512
| 121
| 0.669604
|
"""
Add BitTorrent download task
"""
import argparse
import abt.cli as cli
import abt.rpc_client as client
import base64
import os
import tempfile
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=cli.progname, description=__doc__.strip())
parser.add_argument('torrent', nargs='?', action='store', help="path to torrent file")
parser.add_argument('--uri', action='store', help="load torrent file from uri")
conn, extra = cli.parse_connection_options()
options, extra = cli.parse_option_dict(extra)
args = parser.parse_args(extra)
if (not args.torrent) == (not args.uri):
parser.error("Exactly one of torrent file path or --uri is expected.")
aria2, _ = client.connect(**conn)
if args.uri:
_, torrent_file = tempfile.mkstemp()
fmt = "aria2c --no-conf --allow-overwrite=true --follow-torrent=false --async-dns=false --dir=%s --out=%s \"%s\""
cmd = fmt % (os.path.dirname(torrent_file), os.path.basename(torrent_file), args.uri)
ret = os.system(cmd)
if ret != 0:
cli.error("Failed. Cannot fetch torrent from %s" % args.uri)
else:
torrent_file = args.torrent
torrent_data = open(torrent_file, 'rb').read()
torrent_b64 = base64.b64encode(torrent_data)
print aria2.addTorrent(torrent_b64, [], options)
| false
| true
|
f715d41a7338077da2b9062cb81f2988564db3d0
| 4,328
|
py
|
Python
|
gouda/bot.py
|
fxcqz/gouda3
|
079bcb52f6357dc7704ec845d916f961a18d59cd
|
[
"MIT"
] | null | null | null |
gouda/bot.py
|
fxcqz/gouda3
|
079bcb52f6357dc7704ec845d916f961a18d59cd
|
[
"MIT"
] | 10
|
2016-03-22T11:52:19.000Z
|
2016-03-27T16:19:06.000Z
|
gouda/bot.py
|
fxcqz/gouda3
|
079bcb52f6357dc7704ec845d916f961a18d59cd
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import importlib
from peewee import SqliteDatabase
from .settings import Settings
DATABASE = SqliteDatabase("gouda.db")
class Gouda(object):
def __init__(self):
self.settings = Settings("config/config.json")
self.name = self.settings.core['nick']
# use ordered dict for definite evaluation
# i.e. we know logs will always be saved first since core is loaded
# first
self.modules = OrderedDict()
self.mains = OrderedDict()
self.load_modules()
self.commands = self.load_commands()
self.db = DATABASE
self.db.connect()
def load_module(self, module):
try:
self.modules[module] = importlib.import_module('gouda.modules.%s.main' % module)
self.commands = self.load_commands()
if hasattr(self.modules[module], "run_schema"):
self.modules[module].run_schema()
if hasattr(self.modules[module], "main"):
self.mains[module] = getattr(self.modules[module], "main")
except ImportError as e:
print("Oh no, an import error:", e)
def load_modules(self):
""" only run on init, innit """
module_list = self.settings['modules']
for module in module_list:
self.load_module(module)
def manage_modules(self, loads, unloads, reloads):
for load in loads:
if load in self.modules:
reloads.append(load)
else:
self.load_module(load)
for unload in unloads:
if unload in self.modules:
self.modules.pop(unload, None)
for reload_ in reloads:
if reload_ in self.modules:
importlib.reload(self.modules[reload_])
if reload_ in self.mains:
self.mains[reload_] = self.modules[reload_].main
self.commands = self.load_commands()
def load_commands(self):
cmds = {}
for name, module in self.modules.items():
try:
commands = getattr(module, "commands")
for command in commands:
if command.lower() != 'none':
cmds[command] = name
except AttributeError:
# no command list implemented in module
pass
return cmds
def get_loads(self, kind, mod):
loads, unloads, reloads = [], [], []
if kind == "load":
loads.append(mod)
elif kind == "unload":
unloads.append(mod)
elif kind == "reload":
reloads.append(mod)
return loads, unloads, reloads
def run(self, conn):
kwargs = {'writer': conn.message, 'db': self.db, 'log': True, 'name': self.name}
while True:
nick, line = conn.read()
loads, unloads, reloads = [], [], []
if line and ''.join(line) != '':
offset = 0
if line[0][:-1] == self.name and len(line) > 1:
offset = 1
# addressed to the bot
kwargs['log'] = False
if len(line) > 2:
loads, unloads, reloads = self.get_loads(line[offset], line[offset+1])
if not (loads or unloads or reloads):
# nothing *loaded, try commands
try:
module = self.commands[line[1].lower()]
func = getattr(self.modules[module], line[1])
msg = line[offset:]
func(
message=msg,
commands=self.commands.keys(),
modules=self.modules.keys(),
**kwargs
)
except Exception as e:
# pretty much anything can fuck it up
print(e)
# run anything else...
for func in self.mains.values():
func(line=line, nick=nick, **kwargs)
# load/unload/reload
self.manage_modules(loads, unloads, reloads)
kwargs['log'] = True
| 37.634783
| 94
| 0.501848
|
from collections import OrderedDict
import importlib
from peewee import SqliteDatabase
from .settings import Settings
DATABASE = SqliteDatabase("gouda.db")
class Gouda(object):
def __init__(self):
self.settings = Settings("config/config.json")
self.name = self.settings.core['nick']
self.modules = OrderedDict()
self.mains = OrderedDict()
self.load_modules()
self.commands = self.load_commands()
self.db = DATABASE
self.db.connect()
def load_module(self, module):
try:
self.modules[module] = importlib.import_module('gouda.modules.%s.main' % module)
self.commands = self.load_commands()
if hasattr(self.modules[module], "run_schema"):
self.modules[module].run_schema()
if hasattr(self.modules[module], "main"):
self.mains[module] = getattr(self.modules[module], "main")
except ImportError as e:
print("Oh no, an import error:", e)
def load_modules(self):
module_list = self.settings['modules']
for module in module_list:
self.load_module(module)
def manage_modules(self, loads, unloads, reloads):
for load in loads:
if load in self.modules:
reloads.append(load)
else:
self.load_module(load)
for unload in unloads:
if unload in self.modules:
self.modules.pop(unload, None)
for reload_ in reloads:
if reload_ in self.modules:
importlib.reload(self.modules[reload_])
if reload_ in self.mains:
self.mains[reload_] = self.modules[reload_].main
self.commands = self.load_commands()
def load_commands(self):
cmds = {}
for name, module in self.modules.items():
try:
commands = getattr(module, "commands")
for command in commands:
if command.lower() != 'none':
cmds[command] = name
except AttributeError:
pass
return cmds
def get_loads(self, kind, mod):
loads, unloads, reloads = [], [], []
if kind == "load":
loads.append(mod)
elif kind == "unload":
unloads.append(mod)
elif kind == "reload":
reloads.append(mod)
return loads, unloads, reloads
def run(self, conn):
kwargs = {'writer': conn.message, 'db': self.db, 'log': True, 'name': self.name}
while True:
nick, line = conn.read()
loads, unloads, reloads = [], [], []
if line and ''.join(line) != '':
offset = 0
if line[0][:-1] == self.name and len(line) > 1:
offset = 1
kwargs['log'] = False
if len(line) > 2:
loads, unloads, reloads = self.get_loads(line[offset], line[offset+1])
if not (loads or unloads or reloads):
try:
module = self.commands[line[1].lower()]
func = getattr(self.modules[module], line[1])
msg = line[offset:]
func(
message=msg,
commands=self.commands.keys(),
modules=self.modules.keys(),
**kwargs
)
except Exception as e:
print(e)
for func in self.mains.values():
func(line=line, nick=nick, **kwargs)
self.manage_modules(loads, unloads, reloads)
kwargs['log'] = True
| true
| true
|
f715d44ead41064419d591910c9d8e9251cb95b1
| 4,635
|
py
|
Python
|
3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py
|
nmningmei/metacognition
|
734082e247cc7fc9d277563e2676e10692617a3f
|
[
"MIT"
] | 3
|
2019-07-09T15:37:46.000Z
|
2019-07-17T16:28:02.000Z
|
3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py
|
nmningmei/metacognition
|
734082e247cc7fc9d277563e2676e10692617a3f
|
[
"MIT"
] | null | null | null |
3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py
|
nmningmei/metacognition
|
734082e247cc7fc9d277563e2676e10692617a3f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 16:07:58 2018
@author: nmei
in exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)
BUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.
"""
if __name__ == '__main__':
import os
import pandas as pd
import numpy as np
import utils
# define result saving directory
dir_saving = 'results_e2'
if not os.path.exists(dir_saving):
os.mkdir(dir_saving)
try:# the subject level processing
df1 = pd.read_csv('e2.csv').iloc[:,1:]
except: # when I test the script
df1 = pd.read_csv('../e2.csv').iloc[:,1:]
df = df1.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# preallocate the data frame structure
results = dict(sub = [],
model = [],
score = [],
window = [],
correctness = [],
awareness = [],
confidence = [],
chance = [],
)
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'confidence'
experiment = 'e2'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
# 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'
# get one of the participants' data
participant = 'cpj'
df_sub = df[df['participant'] == participant]
# pick 1- no experience, 2 brief glimpse for binary classification
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back to 4-back
for n_back in np.arange(1,5):
# experiment score
results = utils.classification(
df_sub.dropna(), # take out nan rows
feature_names, # feature columns
target_name, # target column
results, # the saving structure
participant, # participant's name
experiment, # experiment name
window = n_back, # N-back
chance = False, # it is NOT estimating the chance level but the empirical classification experiment
name_for_scale = name_for_scale # scale some of the variables
)
# empirical chance level
results = utils.classification(
df_sub.dropna(),
feature_names,
target_name,
results,
participant,
experiment,
window = n_back,
chance = True, # it is to estimate the empirical chance level
name_for_scale = name_for_scale
)
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))
| 31.965517
| 159
| 0.408846
|
if __name__ == '__main__':
import os
import pandas as pd
import numpy as np
import utils
dir_saving = 'results_e2'
if not os.path.exists(dir_saving):
os.mkdir(dir_saving)
try:
df1 = pd.read_csv('e2.csv').iloc[:,1:]
except:
df1 = pd.read_csv('../e2.csv').iloc[:,1:]
df = df1.copy()
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
results = dict(sub = [],
model = [],
score = [],
window = [],
correctness = [],
awareness = [],
confidence = [],
chance = [],
)
np.random.seed(12345)
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'confidence'
experiment = 'e2'
name_for_scale = ['awareness']
participant = 'cpj'
df_sub = df[df['participant'] == participant]
# pick 1- no experience, 2 brief glimpse for binary classification
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back to 4-back
for n_back in np.arange(1,5):
# experiment score
results = utils.classification(
df_sub.dropna(), # take out nan rows
feature_names, # feature columns
target_name, # target column
results, # the saving structure
participant, # participant's name
experiment,
window = n_back,
chance = False,
name_for_scale = name_for_scale
)
results = utils.classification(
df_sub.dropna(),
feature_names,
target_name,
results,
participant,
experiment,
window = n_back,
chance = True,
name_for_scale = name_for_scale
)
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))
| true
| true
|
f715d5acbe3a069259390dee428b7666dca26c08
| 9,706
|
py
|
Python
|
src/intermediate_representation/sem_utils.py
|
ckosten/ValueNet4SPARQL
|
de320a2f0e1a4c5a6c0e5cc79057dda9901046e8
|
[
"Apache-2.0"
] | null | null | null |
src/intermediate_representation/sem_utils.py
|
ckosten/ValueNet4SPARQL
|
de320a2f0e1a4c5a6c0e5cc79057dda9901046e8
|
[
"Apache-2.0"
] | null | null | null |
src/intermediate_representation/sem_utils.py
|
ckosten/ValueNet4SPARQL
|
de320a2f0e1a4c5a6c0e5cc79057dda9901046e8
|
[
"Apache-2.0"
] | 1
|
2021-09-23T13:02:45.000Z
|
2021-09-23T13:02:45.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
"""
# @Time : 2019/5/27
# @Author : Jiaqi&Zecheng
# @File : sem_utils.py
# @Software: PyCharm
"""
import os
import json
import re as regex
import spacy
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def partial_match(query, table_name):
query = [token.lemma_ for token in nlp(query)]
table_name = [nlp(token)[0].lemma_ for token in table_name]
if query in table_name:
return True
return False
def is_partial_match(query, table_names):
query = nlp(query)[0].lemma_
table_names = [[token.lemma_ for token in nlp(names)] for names in table_names]
same_count = 0
result = None
for names in table_names:
if query in names:
same_count += 1
result = names
return result if same_count == 1 else False
def multi_option(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
re = is_partial_match(question[i][0], names)
if re is not False:
return re
return False
def multi_equal(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
if question[i] == names:
return i
return False
def random_choice(question_arg, question_arg_type, names, ground_col_labels, q_ind, N, origin_name):
# first try if there are other table
for t_ind, t_val in enumerate(question_arg_type):
if t_val == ['table']:
return names[origin_name.index(question_arg[t_ind])]
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question_arg):
if len(ground_col_labels) == 0:
for n in names:
if partial_match(question_arg[i][0], n) is True:
return n
else:
for n_id, n in enumerate(names):
if n_id in ground_col_labels and partial_match(question_arg[i][0], n) is True:
return n
if len(ground_col_labels) > 0:
return names[ground_col_labels[0]]
else:
return names[0]
def alter_column0(datas):
"""
Attach column * table
:return: model_result_replace
"""
zero_count = 0
count = 0
result = []
for d in datas:
if 'C(0)' in d['model_result']:
pattern = regex.compile('C\(.*?\) T\(.*?\)')
result_pattern = list(set(pattern.findall(d['model_result'])))
ground_col_labels = []
for pa in result_pattern:
pa = pa.split(' ')
if pa[0] != 'C(0)':
index = int(pa[1][2:-1])
ground_col_labels.append(index)
ground_col_labels = list(set(ground_col_labels))
question_arg_type = d['question_arg_type']
question_arg = d['question_arg']
table_names = [[token.lemma_ for token in nlp(names)] for names in d['table_names']]
origin_table_names = [[wordnet_lemmatizer.lemmatize(x.lower()) for x in names.split(' ')] for names in
d['table_names']]
count += 1
easy_flag = False
for q_ind, q in enumerate(d['question_arg']):
q_str = " ".join(" ".join(x) for x in d['question_arg'])
if 'how many' in q_str or 'number of' in q_str or 'count of' in q_str:
easy_flag = True
if easy_flag:
# check for the last one is a table word
for q_ind, q in enumerate(d['question_arg']):
if (q_ind > 0 and q == ['many'] and d['question_arg'][q_ind - 1] == ['how']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['number']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['count']):
re = multi_equal(question_arg_type, q_ind, ['table'], 2)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 2)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
pass
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
zero_count += 1
break
else:
M_OP = False
for q_ind, q in enumerate(d['question_arg']):
if M_OP is False and q in [['than'], ['least'], ['most'], ['msot'], ['fewest']] or \
question_arg_type[q_ind] == ['M_OP']:
M_OP = True
re = multi_equal(question_arg_type, q_ind, ['table'], 3)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 3)
if re is not False:
table_result = re
# print(table_result)
result.append((d['query'], d['question'], table_result, d))
pass
else:
# zero_count += 1
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
pass
if M_OP is False:
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names, ground_col_labels=ground_col_labels, q_ind=q_ind,
N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
for re in result:
table_names = [[token.lemma_ for token in nlp(names)] for names in re[3]['table_names']]
origin_table_names = [[x for x in names.split(' ')] for names in re[3]['table_names']]
if re[2] in table_names:
re[3]['rule_count'] = table_names.index(re[2])
else:
re[3]['rule_count'] = origin_table_names.index(re[2])
for data in datas:
if 'rule_count' in data:
str_replace = 'C(0) T(' + str(data['rule_count']) + ')'
replace_result = regex.sub('C\(0\) T\(.\)', str_replace, data['model_result'])
data['model_result_replace'] = replace_result
else:
data['model_result_replace'] = data['model_result']
| 46.888889
| 117
| 0.474861
|
import os
import json
import re as regex
import spacy
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def partial_match(query, table_name):
query = [token.lemma_ for token in nlp(query)]
table_name = [nlp(token)[0].lemma_ for token in table_name]
if query in table_name:
return True
return False
def is_partial_match(query, table_names):
query = nlp(query)[0].lemma_
table_names = [[token.lemma_ for token in nlp(names)] for names in table_names]
same_count = 0
result = None
for names in table_names:
if query in names:
same_count += 1
result = names
return result if same_count == 1 else False
def multi_option(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
re = is_partial_match(question[i][0], names)
if re is not False:
return re
return False
def multi_equal(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
if question[i] == names:
return i
return False
def random_choice(question_arg, question_arg_type, names, ground_col_labels, q_ind, N, origin_name):
for t_ind, t_val in enumerate(question_arg_type):
if t_val == ['table']:
return names[origin_name.index(question_arg[t_ind])]
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question_arg):
if len(ground_col_labels) == 0:
for n in names:
if partial_match(question_arg[i][0], n) is True:
return n
else:
for n_id, n in enumerate(names):
if n_id in ground_col_labels and partial_match(question_arg[i][0], n) is True:
return n
if len(ground_col_labels) > 0:
return names[ground_col_labels[0]]
else:
return names[0]
def alter_column0(datas):
zero_count = 0
count = 0
result = []
for d in datas:
if 'C(0)' in d['model_result']:
pattern = regex.compile('C\(.*?\) T\(.*?\)')
result_pattern = list(set(pattern.findall(d['model_result'])))
ground_col_labels = []
for pa in result_pattern:
pa = pa.split(' ')
if pa[0] != 'C(0)':
index = int(pa[1][2:-1])
ground_col_labels.append(index)
ground_col_labels = list(set(ground_col_labels))
question_arg_type = d['question_arg_type']
question_arg = d['question_arg']
table_names = [[token.lemma_ for token in nlp(names)] for names in d['table_names']]
origin_table_names = [[wordnet_lemmatizer.lemmatize(x.lower()) for x in names.split(' ')] for names in
d['table_names']]
count += 1
easy_flag = False
for q_ind, q in enumerate(d['question_arg']):
q_str = " ".join(" ".join(x) for x in d['question_arg'])
if 'how many' in q_str or 'number of' in q_str or 'count of' in q_str:
easy_flag = True
if easy_flag:
for q_ind, q in enumerate(d['question_arg']):
if (q_ind > 0 and q == ['many'] and d['question_arg'][q_ind - 1] == ['how']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['number']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['count']):
re = multi_equal(question_arg_type, q_ind, ['table'], 2)
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 2)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
pass
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
zero_count += 1
break
else:
M_OP = False
for q_ind, q in enumerate(d['question_arg']):
if M_OP is False and q in [['than'], ['least'], ['most'], ['msot'], ['fewest']] or \
question_arg_type[q_ind] == ['M_OP']:
M_OP = True
re = multi_equal(question_arg_type, q_ind, ['table'], 3)
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 3)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
pass
if M_OP is False:
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names, ground_col_labels=ground_col_labels, q_ind=q_ind,
N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
for re in result:
table_names = [[token.lemma_ for token in nlp(names)] for names in re[3]['table_names']]
origin_table_names = [[x for x in names.split(' ')] for names in re[3]['table_names']]
if re[2] in table_names:
re[3]['rule_count'] = table_names.index(re[2])
else:
re[3]['rule_count'] = origin_table_names.index(re[2])
for data in datas:
if 'rule_count' in data:
str_replace = 'C(0) T(' + str(data['rule_count']) + ')'
replace_result = regex.sub('C\(0\) T\(.\)', str_replace, data['model_result'])
data['model_result_replace'] = replace_result
else:
data['model_result_replace'] = data['model_result']
| true
| true
|
f715d67eef0245ded35fcb508560db29166544bc
| 518
|
py
|
Python
|
components/driver/test_apps/i2s_test_apps/i2s/pytest_i2s.py
|
fbucafusco/esp-idf
|
c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361
|
[
"Apache-2.0"
] | null | null | null |
components/driver/test_apps/i2s_test_apps/i2s/pytest_i2s.py
|
fbucafusco/esp-idf
|
c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361
|
[
"Apache-2.0"
] | null | null | null |
components/driver/test_apps/i2s_test_apps/i2s/pytest_i2s.py
|
fbucafusco/esp-idf
|
c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32c3
@pytest.mark.esp32s3
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'iram_safe',
'release',
],
indirect=True,
)
def test_i2s(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output()
| 20.72
| 66
| 0.696911
|
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32c3
@pytest.mark.esp32s3
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'iram_safe',
'release',
],
indirect=True,
)
def test_i2s(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output()
| true
| true
|
f715d6d5b4734d75244b4bcd84df7da47ab5fd20
| 5,939
|
py
|
Python
|
coresupdate.py
|
danitxu79/Retroarch_Cores_Update_from_Retropie_Menu
|
2841b12b0d29b08e71e0ddbbd148e5cf84cad3ce
|
[
"MIT"
] | null | null | null |
coresupdate.py
|
danitxu79/Retroarch_Cores_Update_from_Retropie_Menu
|
2841b12b0d29b08e71e0ddbbd148e5cf84cad3ce
|
[
"MIT"
] | null | null | null |
coresupdate.py
|
danitxu79/Retroarch_Cores_Update_from_Retropie_Menu
|
2841b12b0d29b08e71e0ddbbd148e5cf84cad3ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# user's retroarch configuration file
retroconfig = '/opt/retropie/configs/all/retroarch.cfg'
# current buildbot url
retrourl = 'https://buildbot.libretro.com'
import argparse
import configparser
import os
import os.path as pth
import platform
import shutil
import sys
import tempfile
import time
import urllib.request
import zipfile
# parse arguments
pars = argparse.ArgumentParser()
pars.add_argument('-c', '--cores', action="store_true",
help='download and extract cores')
pars.add_argument('-s', '--assets', action="store_true",
help='download and extract asset files')
pars.add_argument('-a', '--all', action="store_true",
help='download and extract both')
pars.add_argument('-v', '--verbose', action="store_true",
help='display target urls and directories')
pars.add_argument('-d', '--dry', action="store_true",
help='dry run; do not download anything')
pars.add_argument('-g', '--config', type=str,
help='specify the retroarch config file')
args = pars.parse_args(args=None if sys.argv[1:] else ['-h'])
# if args.all:
# args.assets = True
# args.cores = True
# (echo raspberry | sudo -S apt-get install dialog -y)
# funcheck=( dialog --separate-output --menu "Retroarch Cores Actualizer" 0 0 0 1 "Update installed cores" 2 "I" 3 "Install all cores"
# opciones=(1 "opción 1" on
# 2 "opción 2" off
# selecciones=$("${funcheck[@]}" "${opciones[@]}" 2>&1 >/dev/tty)
# clear
# do
# case $seleccion in
# 1)
# echo "Escogiste la opción 1"
# ;;
# 2)
# echo "Escogiste la opción 2"
# ;;
# asset names used in the buildbot and config file
itemlist = {
'assets' : 'assets_directory',
'autoconfig' : 'joypad_autoconfig_dir',
'cheats' : 'cheat_database_path',
'database-cursors' : 'cursor_directory',
'database-rdb' : 'content_database_path',
'info' : 'libretro_info_path',
'overlays' : 'overlay_directory',
'shaders_cg' : 'video_shader_dir',
'shaders_glsl' : 'video_shader_dir',
'shaders_slang' : 'video_shader_dir',
}
# get platform
if sys.platform == 'win32':
osname = 'windows'
time.timezone = 0
elif sys.platform == 'darwin':
osname = 'apple/osx'
else:
osname = sys.platform
# check architecture
if platform.machine().endswith('64'):
osarch = 'x86_64'
else:
osarch = 'x86'
# get partial download urls
urlcores = pth.join(retrourl, 'nightly', osname, osarch, 'latest')
urlassets = pth.join(retrourl, 'assets/frontend')
# get config path; expand unix home folders
if args.config:
retroconfig = args.config
retroconfig = pth.normcase(pth.expanduser(retroconfig))
retrodir = pth.dirname(retroconfig)
# retrieve paths from retroarch user config
with open(retroconfig, 'r') as tmpconf:
conf = configparser.ConfigParser()
conf.read_string('[A]\n' + tmpconf.read())
# get asset paths; strip quotes and expand any ~'s
for item in itemlist:
itemlist[item] = pth.expanduser(conf['A'][itemlist[item]].strip('"'))
# get whole path of portable folders
if itemlist[item].startswith(':'):
itemlist[item] = pth.join(retrodir, itemlist[item].lstrip(':\\'))
# add subdirs to shaders' paths
for shdr in ['shaders_cg', 'shaders_glsl', 'shaders_slang']:
itemlist[shdr] = pth.join(itemlist[shdr], shdr)
# and also get the cores path
coredir = pth.expanduser(conf['A']['libretro_directory'].strip('"'))
if coredir.startswith(':'):
coredir = pth.join(retrodir, coredir.lstrip(':\\'))
corelist = sorted(os.listdir(coredir))
conf.clear()
# download and extract archive to destination
def fetch_archive(url, dest):
# download
with urllib.request.urlopen(url) as tmpdata:
tmpfile = tempfile.NamedTemporaryFile(suffix='.zip')
shutil.copyfileobj(tmpdata, tmpfile)
# extract
with zipfile.ZipFile(tmpfile, 'r') as tmpzip:
for member in tmpzip.infolist():
tmpzip.extract(member, dest)
# use original modification timestamp
origdate = time.mktime(member.date_time + (0, 0, -1)) - time.timezone
os.utime(pth.join(dest, member.filename), (origdate, origdate))
# download and extract each core currently in retroarch's core directory
if args.cores:
print('updating cores...')
for core in corelist:
coreurl = pth.join(urlcores, core+'.zip').replace('\\', '/')
print('[%2d/%2d] fetching: %s' % (corelist.index(core)+1,
len(corelist),
core+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % coreurl)
print(' '*7, 'into dir: %s' % coredir)
if not args.dry:
try:
fetch_archive(coreurl, coredir)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % core+'.zip')
print(' '*7, excp)
# download and extract each asset archive into their respective directories
if args.assets:
print('updating assets...')
for item in itemlist:
itemurl = pth.join(urlassets, item+'.zip').replace('\\', '/')
itempath = itemlist[item]
print('[%2d/%2d] fetching: %s' % (list(itemlist).index(item)+1,
len(itemlist),
item+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % itemurl)
print(' '*7, 'into dir: %s' % itempath)
if not args.dry:
try:
os.makedirs(itempath, exist_ok=True)
fetch_archive(itemurl, itempath)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % item+'.zip')
print(' '*7, excp)
| 30.613402
| 135
| 0.602795
|
retroconfig = '/opt/retropie/configs/all/retroarch.cfg'
# current buildbot url
retrourl = 'https://buildbot.libretro.com'
import argparse
import configparser
import os
import os.path as pth
import platform
import shutil
import sys
import tempfile
import time
import urllib.request
import zipfile
# parse arguments
pars = argparse.ArgumentParser()
pars.add_argument('-c', '--cores', action="store_true",
help='download and extract cores')
pars.add_argument('-s', '--assets', action="store_true",
help='download and extract asset files')
pars.add_argument('-a', '--all', action="store_true",
help='download and extract both')
pars.add_argument('-v', '--verbose', action="store_true",
help='display target urls and directories')
pars.add_argument('-d', '--dry', action="store_true",
help='dry run; do not download anything')
pars.add_argument('-g', '--config', type=str,
help='specify the retroarch config file')
args = pars.parse_args(args=None if sys.argv[1:] else ['-h'])
# if args.all:
# args.assets = True
# args.cores = True
# (echo raspberry | sudo -S apt-get install dialog -y)
# funcheck=( dialog --separate-output --menu "Retroarch Cores Actualizer" 0 0 0 1 "Update installed cores" 2 "I" 3 "Install all cores"
# opciones=(1 "opción 1" on
# 2 "opción 2" off
# selecciones=$("${funcheck[@]}" "${opciones[@]}" 2>&1 >/dev/tty)
# clear
# do
# case $seleccion in
# 1)
# echo "Escogiste la opción 1"
# ;;
# 2)
# echo "Escogiste la opción 2"
# ;;
# asset names used in the buildbot and config file
itemlist = {
'assets' : 'assets_directory',
'autoconfig' : 'joypad_autoconfig_dir',
'cheats' : 'cheat_database_path',
'database-cursors' : 'cursor_directory',
'database-rdb' : 'content_database_path',
'info' : 'libretro_info_path',
'overlays' : 'overlay_directory',
'shaders_cg' : 'video_shader_dir',
'shaders_glsl' : 'video_shader_dir',
'shaders_slang' : 'video_shader_dir',
}
# get platform
if sys.platform == 'win32':
osname = 'windows'
time.timezone = 0
elif sys.platform == 'darwin':
osname = 'apple/osx'
else:
osname = sys.platform
# check architecture
if platform.machine().endswith('64'):
osarch = 'x86_64'
else:
osarch = 'x86'
# get partial download urls
urlcores = pth.join(retrourl, 'nightly', osname, osarch, 'latest')
urlassets = pth.join(retrourl, 'assets/frontend')
# get config path; expand unix home folders
if args.config:
retroconfig = args.config
retroconfig = pth.normcase(pth.expanduser(retroconfig))
retrodir = pth.dirname(retroconfig)
# retrieve paths from retroarch user config
with open(retroconfig, 'r') as tmpconf:
conf = configparser.ConfigParser()
conf.read_string('[A]\n' + tmpconf.read())
# get asset paths; strip quotes and expand any ~'s
for item in itemlist:
itemlist[item] = pth.expanduser(conf['A'][itemlist[item]].strip('"'))
# get whole path of portable folders
if itemlist[item].startswith(':'):
itemlist[item] = pth.join(retrodir, itemlist[item].lstrip(':\\'))
# add subdirs to shaders' paths
for shdr in ['shaders_cg', 'shaders_glsl', 'shaders_slang']:
itemlist[shdr] = pth.join(itemlist[shdr], shdr)
# and also get the cores path
coredir = pth.expanduser(conf['A']['libretro_directory'].strip('"'))
if coredir.startswith(':'):
coredir = pth.join(retrodir, coredir.lstrip(':\\'))
corelist = sorted(os.listdir(coredir))
conf.clear()
# download and extract archive to destination
def fetch_archive(url, dest):
# download
with urllib.request.urlopen(url) as tmpdata:
tmpfile = tempfile.NamedTemporaryFile(suffix='.zip')
shutil.copyfileobj(tmpdata, tmpfile)
# extract
with zipfile.ZipFile(tmpfile, 'r') as tmpzip:
for member in tmpzip.infolist():
tmpzip.extract(member, dest)
# use original modification timestamp
origdate = time.mktime(member.date_time + (0, 0, -1)) - time.timezone
os.utime(pth.join(dest, member.filename), (origdate, origdate))
# download and extract each core currently in retroarch's core directory
if args.cores:
print('updating cores...')
for core in corelist:
coreurl = pth.join(urlcores, core+'.zip').replace('\\', '/')
print('[%2d/%2d] fetching: %s' % (corelist.index(core)+1,
len(corelist),
core+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % coreurl)
print(' '*7, 'into dir: %s' % coredir)
if not args.dry:
try:
fetch_archive(coreurl, coredir)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % core+'.zip')
print(' '*7, excp)
if args.assets:
print('updating assets...')
for item in itemlist:
itemurl = pth.join(urlassets, item+'.zip').replace('\\', '/')
itempath = itemlist[item]
print('[%2d/%2d] fetching: %s' % (list(itemlist).index(item)+1,
len(itemlist),
item+'.zip'))
if args.verbose:
print(' '*7, 'from url: %s' % itemurl)
print(' '*7, 'into dir: %s' % itempath)
if not args.dry:
try:
os.makedirs(itempath, exist_ok=True)
fetch_archive(itemurl, itempath)
except Exception as excp:
print(' '*7, 'could not fetch file: %s' % item+'.zip')
print(' '*7, excp)
| true
| true
|
f715d7a31a5df246928567994bae099da3cac6a5
| 7,559
|
py
|
Python
|
webStorm-APICloud/python_tools/Lib/SimpleHTTPServer.py
|
zzr925028429/androidyianyan
|
8967fdba92473e8e65ee222515dfc54cdae5bb0b
|
[
"MIT"
] | 81
|
2017-03-13T08:24:01.000Z
|
2021-04-02T09:48:38.000Z
|
Macros/Python/SimpleHTTPServer.py
|
rec/DMXIS
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | 6
|
2017-04-30T08:36:55.000Z
|
2017-09-22T01:37:28.000Z
|
Macros/Python/SimpleHTTPServer.py
|
rec/DMXIS
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | 41
|
2017-03-18T14:11:58.000Z
|
2021-04-14T05:06:09.000Z
|
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| 34.515982
| 83
| 0.582484
|
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
f = self.send_head()
if f:
f.close()
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init()
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream',
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| true
| true
|
f715d813ac5ceeef5aa1cbcaa572ba4bc2637be9
| 182
|
py
|
Python
|
reduction/test/plot_algol_h_alpha_line.py
|
christianwbrock/algol-reduction
|
5e85734d9e9e31985ead3ce40e67535418351010
|
[
"BSD-3-Clause"
] | null | null | null |
reduction/test/plot_algol_h_alpha_line.py
|
christianwbrock/algol-reduction
|
5e85734d9e9e31985ead3ce40e67535418351010
|
[
"BSD-3-Clause"
] | null | null | null |
reduction/test/plot_algol_h_alpha_line.py
|
christianwbrock/algol-reduction
|
5e85734d9e9e31985ead3ce40e67535418351010
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
from reduction.algol_h_alpha_line_model import AlgolHAlphaModel
if __name__ == '__main__':
AlgolHAlphaModel().plot(plt.axes())
plt.show()
| 18.2
| 63
| 0.758242
|
import matplotlib.pyplot as plt
from reduction.algol_h_alpha_line_model import AlgolHAlphaModel
if __name__ == '__main__':
AlgolHAlphaModel().plot(plt.axes())
plt.show()
| true
| true
|
f715d8a0e2bc4f9037250784399021a44f9b5b67
| 2,019
|
py
|
Python
|
h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | 1
|
2020-12-18T19:20:02.000Z
|
2020-12-18T19:20:02.000Z
|
import sys
sys.path.insert(1, "../../../")
import h2o
def link_correct_default(ip,port):
# Connect to h2o
h2o.init(ip,port)
print("Reading in original prostate data.")
h2o_data = h2o.upload_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Compare models with link unspecified and canonical link specified.")
print("GAUSSIAN: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian")
h2o_model_specified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian", link="identity")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("BINOMIAL: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial", link="logit")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("POISSON: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson", link="log")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("GAMMA: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma")
h2o_model_specified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma", link="inverse")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_correct_default)
| 51.769231
| 113
| 0.752353
|
import sys
sys.path.insert(1, "../../../")
import h2o
def link_correct_default(ip,port):
h2o.init(ip,port)
print("Reading in original prostate data.")
h2o_data = h2o.upload_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Compare models with link unspecified and canonical link specified.")
print("GAUSSIAN: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian")
h2o_model_specified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian", link="identity")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("BINOMIAL: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial", link="logit")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("POISSON: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson", link="log")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("GAMMA: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma")
h2o_model_specified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma", link="inverse")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_correct_default)
| true
| true
|
f715d969b2e39092279936585118b9960ebb2227
| 204
|
py
|
Python
|
sigbox/__init__.py
|
ok65/sigbox
|
eacec88ccdc3929e19d92d54ef3c52dda54e5856
|
[
"WTFPL"
] | null | null | null |
sigbox/__init__.py
|
ok65/sigbox
|
eacec88ccdc3929e19d92d54ef3c52dda54e5856
|
[
"WTFPL"
] | null | null | null |
sigbox/__init__.py
|
ok65/sigbox
|
eacec88ccdc3929e19d92d54ef3c52dda54e5856
|
[
"WTFPL"
] | null | null | null |
from sigbox.signal_decorator import SignalDecorator
from sigbox.signal_box import SignalBox, SignalBoxClass
from sigbox.sigbox import SigBox
__all__ = [SignalBox, SignalDecorator, SignalBoxClass, SigBox]
| 40.8
| 62
| 0.857843
|
from sigbox.signal_decorator import SignalDecorator
from sigbox.signal_box import SignalBox, SignalBoxClass
from sigbox.sigbox import SigBox
__all__ = [SignalBox, SignalDecorator, SignalBoxClass, SigBox]
| true
| true
|
f715d9a77e0c016a8984da9e96656403d08f49b6
| 3,270
|
py
|
Python
|
aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTransitRouterVbrAttachmentAttributeRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTransitRouterVbrAttachmentAttributeRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTransitRouterVbrAttachmentAttributeRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class UpdateTransitRouterVbrAttachmentAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'UpdateTransitRouterVbrAttachmentAttribute','cbn')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_TransitRouterAttachmentName(self):
return self.get_query_params().get('TransitRouterAttachmentName')
def set_TransitRouterAttachmentName(self,TransitRouterAttachmentName):
self.add_query_param('TransitRouterAttachmentName',TransitRouterAttachmentName)
def get_DryRun(self):
return self.get_query_params().get('DryRun')
def set_DryRun(self,DryRun):
self.add_query_param('DryRun',DryRun)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_TransitRouterAttachmentId(self):
return self.get_query_params().get('TransitRouterAttachmentId')
def set_TransitRouterAttachmentId(self,TransitRouterAttachmentId):
self.add_query_param('TransitRouterAttachmentId',TransitRouterAttachmentId)
def get_TransitRouterAttachmentDescription(self):
return self.get_query_params().get('TransitRouterAttachmentDescription')
def set_TransitRouterAttachmentDescription(self,TransitRouterAttachmentDescription):
self.add_query_param('TransitRouterAttachmentDescription',TransitRouterAttachmentDescription)
| 38.023256
| 100
| 0.795107
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class UpdateTransitRouterVbrAttachmentAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'UpdateTransitRouterVbrAttachmentAttribute','cbn')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_TransitRouterAttachmentName(self):
return self.get_query_params().get('TransitRouterAttachmentName')
def set_TransitRouterAttachmentName(self,TransitRouterAttachmentName):
self.add_query_param('TransitRouterAttachmentName',TransitRouterAttachmentName)
def get_DryRun(self):
return self.get_query_params().get('DryRun')
def set_DryRun(self,DryRun):
self.add_query_param('DryRun',DryRun)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_TransitRouterAttachmentId(self):
return self.get_query_params().get('TransitRouterAttachmentId')
def set_TransitRouterAttachmentId(self,TransitRouterAttachmentId):
self.add_query_param('TransitRouterAttachmentId',TransitRouterAttachmentId)
def get_TransitRouterAttachmentDescription(self):
return self.get_query_params().get('TransitRouterAttachmentDescription')
def set_TransitRouterAttachmentDescription(self,TransitRouterAttachmentDescription):
self.add_query_param('TransitRouterAttachmentDescription',TransitRouterAttachmentDescription)
| true
| true
|
f715d9aaf4f384ba12cc7069add8806d2f40e71b
| 552
|
py
|
Python
|
446.py
|
wilbertgeng/LeetCode_exercise
|
f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc
|
[
"MIT"
] | null | null | null |
446.py
|
wilbertgeng/LeetCode_exercise
|
f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc
|
[
"MIT"
] | null | null | null |
446.py
|
wilbertgeng/LeetCode_exercise
|
f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc
|
[
"MIT"
] | null | null | null |
"""446. Arithmetic Slices II - Subsequence"""
class Solution(object):
def numberOfArithmeticSlices(self, A):
"""
:type A: List[int]
:rtype: int
"""
dp = [collections.defaultdict(int) for _ in range(len(A))]
total = 0
for i in range(len(A)):
for j in range(i):
k = A[i] - A[j]
dp[i][k] += 1
if k in dp[j]:
dp[i][k] += dp[j][k]
total += dp[j][k]
return total
######
| 14.153846
| 66
| 0.405797
|
class Solution(object):
def numberOfArithmeticSlices(self, A):
dp = [collections.defaultdict(int) for _ in range(len(A))]
total = 0
for i in range(len(A)):
for j in range(i):
k = A[i] - A[j]
dp[i][k] += 1
if k in dp[j]:
dp[i][k] += dp[j][k]
total += dp[j][k]
return total
| true
| true
|
f715d9f86be99e54b40e4f20ec32ccf74e3c5ae7
| 1,497
|
py
|
Python
|
ci/fireci/fireci/gradle.py
|
Elke26/firebase-android-sdk
|
47d41b9dc17cd95a7799f672f5cc14f1747642ec
|
[
"Apache-2.0"
] | 1
|
2021-01-30T19:52:32.000Z
|
2021-01-30T19:52:32.000Z
|
ci/fireci/fireci/gradle.py
|
Elke26/firebase-android-sdk
|
47d41b9dc17cd95a7799f672f5cc14f1747642ec
|
[
"Apache-2.0"
] | 1
|
2019-03-01T19:54:34.000Z
|
2019-03-01T19:58:03.000Z
|
ci/fireci/fireci/gradle.py
|
samtstern/firebase-android-sdk
|
ef399052f99019feb294746447e2bd8a5a6e81a4
|
[
"Apache-2.0"
] | 1
|
2021-01-02T20:23:09.000Z
|
2021-01-02T20:23:09.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
from . import stats
_logger = logging.getLogger('fireci.gradle')
ADB_INSTALL_TIMEOUT = '5'
def P(name, value):
"""Returns name and value in the format of gradle's project property cli argument."""
return '-P{}={}'.format(name, value)
@stats.measure_call('gradle')
def run(*args, gradle_opts='', workdir=None):
"""Invokes gradle with specified args and gradle_opts."""
new_env = dict(os.environ)
if gradle_opts:
new_env['GRADLE_OPTS'] = gradle_opts
new_env[
'ADB_INSTALL_TIMEOUT'] = ADB_INSTALL_TIMEOUT # 5 minutes, rather than 2 minutes
stats.propagate_context_into(new_env)
command = ['./gradlew'] + list(args)
_logger.info('Executing gradle command: "%s" in directory: "%s"',
" ".join(command), workdir if workdir else '.')
return subprocess.check_call(
command,
cwd=workdir,
env=new_env,
)
| 29.94
| 87
| 0.716099
|
import logging
import os
import subprocess
import sys
from . import stats
_logger = logging.getLogger('fireci.gradle')
ADB_INSTALL_TIMEOUT = '5'
def P(name, value):
return '-P{}={}'.format(name, value)
@stats.measure_call('gradle')
def run(*args, gradle_opts='', workdir=None):
new_env = dict(os.environ)
if gradle_opts:
new_env['GRADLE_OPTS'] = gradle_opts
new_env[
'ADB_INSTALL_TIMEOUT'] = ADB_INSTALL_TIMEOUT
stats.propagate_context_into(new_env)
command = ['./gradlew'] + list(args)
_logger.info('Executing gradle command: "%s" in directory: "%s"',
" ".join(command), workdir if workdir else '.')
return subprocess.check_call(
command,
cwd=workdir,
env=new_env,
)
| true
| true
|
f715da382fe2b6fe6c5bc405301456843f05fbef
| 34,975
|
py
|
Python
|
cmf/models/cmfsm.py
|
lidongyv/Explicit-Context-Mapping-for-Stereo-Matching
|
9b2e63982daf5629045de0bf0694d8ccb111b2f1
|
[
"Apache-2.0"
] | 1
|
2020-12-31T02:40:49.000Z
|
2020-12-31T02:40:49.000Z
|
cmf/models/cmfsm.py
|
lidongyv/Explicit-Context-Mapping-for-Stereo-Matching
|
9b2e63982daf5629045de0bf0694d8ccb111b2f1
|
[
"Apache-2.0"
] | null | null | null |
cmf/models/cmfsm.py
|
lidongyv/Explicit-Context-Mapping-for-Stereo-Matching
|
9b2e63982daf5629045de0bf0694d8ccb111b2f1
|
[
"Apache-2.0"
] | 1
|
2020-12-31T02:40:49.000Z
|
2020-12-31T02:40:49.000Z
|
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-07-17 10:44:43
# @Last Modified by: yulidong
# @Last Modified time: 2019-03-01 14:12:35
# -*- coding: utf-8 -*-
# @Author: lidong
# @Date: 2018-03-20 18:01:52
# @Last Modified by: yulidong
# @Last Modified time: 2018-07-16 22:16:14
import time
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from torch.nn.functional import cosine_similarity as cosine_s
from cmf import caffe_pb2
from cmf.models.utils import *
rsn_specs = {
'scene':
{
'n_classes': 9,
'input_size': (540, 960),
'block_config': [3, 4, 23, 3],
},
}
group_dim=32
pramid_dim=8
group_norm_group_num = 32
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(
nn.Conv3d(
in_planes,
out_planes,
kernel_size=kernel_size,
padding=pad,
stride=stride,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(
torch.index_select(
left, 3,
Variable(torch.LongTensor(
[i for i in range(shift, width)])).cuda()),
(shift, 0, 0, 0))
shifted_right = F.pad(
torch.index_select(
right, 3,
Variable(torch.LongTensor(
[i for i in range(width - shift)])).cuda()),
(shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(
batch, filters * 2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super().__init__()
self.disp = Variable(
torch.Tensor(
np.reshape(np.array(range(maxdisp)),
[1, maxdisp, 1, 1])).cuda(),
requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.firstconv = nn.Sequential(
convbn(3, 32, 3, 1, 1, 1),
# nn.GroupNorm(group_dim, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1, bias=False))
self.secondconv = nn.Sequential(
nn.GroupNorm(group_dim, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.branch1 = nn.Sequential(
nn.AvgPool2d((64, 64), stride=(64, 64)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(
nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(
nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(
nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(
convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.GroupNorm(group_norm_group_num, planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
output_all = self.firstconv(x)
output=self.secondconv(output_all)
output_rt = self.layer1(output)
output_raw = self.layer2(output_rt)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(
output_branch1, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(
output_branch2, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(
output_branch3, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(
output_branch4, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_feature = torch.cat(
(output_raw, output_skip, output_branch4, output_branch3,
output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature, output_rt,output_all
class hourglass(nn.Module):
def __init__(self, inplanes):
super().__init__()
self.conv1 = nn.Sequential(
convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes * 2)) # +conv2
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes,
kernel_size=3,
padding=1,
output_padding=(1,1,1),
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes)) # +x
def forward(self, x, presqu, postsqu):
out = self.conv1(x) # in:1/4 out:1/8
pre = self.conv2(out) # in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) # in:1/8 out:1/16
out = self.conv4(out) # in:1/16 out:1/16
if presqu is not None:
post = F.relu(
self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8
else:
post = F.relu(self.conv5(out) + pre, inplace=True)
out = self.conv6(post) # in:1/8 out:1/4
return out, pre, post
class similarity_measure1(nn.Module):
def __init__(self):
super(similarity_measure1, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(66, 32, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
#self.relu3 = nn.Sigmoid()
# self.conv4 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1)
# self.relu4 = nn.LeakyReLU(inplace=True)
# self.conv5 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1)
# self.relu5 = nn.ReLU(inplace=True)
#self.s1=nn.Parameter(torch.ones(1)).float()*0.5
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.conv3(output)
#output = self.relu3(output)
# output = self.conv4(output)
# output = self.relu4(output)
# output = self.conv5(output)
# #output = torch.abs(output)
# output = self.relu5(output)
# print(output.shape)
# print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())
# output = output/torch.max(output)
# output = output-torch.min(output)
# output = 1-output
# output = torch.exp(-output)
#print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())
return output
class similarity_measure2(nn.Module):
def __init__(self):
super(similarity_measure2, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(3, 3, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(3, 2, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
#self.s2=nn.Parameter(torch.ones(1)).float()*0.5
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
return output
def matrix_generation():
scale=4
x=torch.arange(-scale//2,scale//2+1).float()
x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
distance_matrix=distance_matrix.unsqueeze(0)
distance_matrix1=distance_matrix+0
distance_matrix2=distance_matrix+0
distance_matrix3=distance_matrix+0
distance_matrix4=distance_matrix+0
distance_matrix5=distance_matrix+0
distance_matrix6=distance_matrix+0
distance_matrix7=distance_matrix+0
distance_matrix8=distance_matrix+0
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0)
#x=x.repeat(hr_feature.shape[0],hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()
distance_matrix1[:,0,:,:]=scale-x+1
distance_matrix2[:,0,:,:]=x
distance_matrix5[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix6[:,0,:,:]=distance_matrix1[:,0,:,:]
distance_matrix7[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix8[:,0,:,:]=distance_matrix1[:,0,:,:]
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0).transpose(2,1)
distance_matrix3[:,1,:,:]=(scale-x+1)
distance_matrix4[:,1,:,:]=x
distance_matrix5[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix6[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix7[:,1,:,:]=distance_matrix4[:,1,:,:]
distance_matrix8[:,1,:,:]=distance_matrix4[:,1,:,:]
# print(distance_matrix3)
return distance_matrix.cuda(),distance_matrix1.cuda(),distance_matrix2.cuda(),distance_matrix3.cuda(),distance_matrix4.cuda(), \
distance_matrix5.cuda(),distance_matrix6.cuda(),distance_matrix7.cuda(),distance_matrix8.cuda()
class eight_related_context_mapping(nn.Module):
def __init__(self):
super(eight_related_context_mapping,self).__init__()
self.similarity1=similarity_measure1()
#need to remove
#self.similarity2=similarity_measure2()
# self.fuse=nn.Sequential(nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1),nn.LeakyReLU(inplace=True))
#self.fuse.weight.data.fill_(1)
self.sigmoid=nn.Sigmoid()
self.distance_matrix,self.distance_matrix1,self.distance_matrix2,self.distance_matrix3,self.distance_matrix4, \
self.distance_matrix5,self.distance_matrix6,self.distance_matrix7,self.distance_matrix8=matrix_generation()
def forward(self, lr_feature, hr_feature,lr_feature_r, hr_feature_r):
#self.fuse.weight.data=torch.abs(self.fuse.weight.data)
with torch.no_grad():
scale=hr_feature.shape[-1]//lr_feature.shape[-1]
if scale%2!=0:
exit()
padding1=hr_feature[:,:1,:,:scale]*0-100
padding2=hr_feature[:,:1,:scale,:]*0-100
distance_matrix=self.distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix1=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix2=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix3=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix4=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix5=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix6=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix7=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix8=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
#center
#reference image
lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \
.unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)
representation=torch.cat([lr_feature,hr_feature,distance_matrix],1)
weight=self.similarity1(representation)
#target image
# lr_feature_r=lr_feature_r.unsqueeze(-1).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3],scale) \
# .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3]*scale) \
# .unsqueeze(-2).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],scale,lr_feature_r.shape[3]*scale) \
# .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2]*scale,lr_feature_r.shape[3]*scale)
# representation_target=torch.cat([lr_feature_r,hr_feature_r,distance_matrix],1)
# weight_target=self.similarity1(representation_target)
#left
#reference
representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],distance_matrix1[:,:,:,:-scale]],1)
weight_l=self.similarity1(representation_l)
weight_l=torch.cat([padding1,weight_l],-1)
#target
# representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)
# weight_l_target=self.similarity1(representation_l_target)
# weight_l_target=torch.cat([padding1,weight_l_target],-1)
#right
#reference
representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],distance_matrix2[:,:,:,scale:]],1)
weight_r=self.similarity1(representation_r)
weight_r=torch.cat([weight_r,padding1],-1)
#target image
# representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)
# weight_r_target=self.similarity1(representation_r_target)
# weight_r_target=torch.cat([weight_r_target,padding1],-1)
#top
#reference
representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
weight_t=self.similarity1(representation_t)
weight_t=torch.cat([padding2,weight_t],-2)
#target
# representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
# weight_t_target=self.similarity1(representation_t_target)
# weight_t_target=torch.cat([padding2,weight_t_target],-2)
#bottom
#reference
representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)
weight_b=self.similarity1(representation_b)
weight_b=torch.cat([weight_b,padding2],-2)
#left-top
#reference
representation_lt=torch.cat([lr_feature[:,:,:-scale,:-scale],hr_feature[:,:,scale:,scale:],distance_matrix5[:,:,:-scale,:-scale]],1)
weight_lt=self.similarity1(representation_lt)
weight_lt=torch.cat([padding2,torch.cat([padding1[...,scale:,:],weight_lt],-1)],-2)
#target
# representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)
# weight_l_target=self.similarity1(representation_l_target)
# weight_l_target=torch.cat([padding1,weight_l_target],-1)
#right-top
#reference
representation_rt=torch.cat([lr_feature[:,:,:-scale,scale:],hr_feature[:,:,scale:,:-scale],distance_matrix6[:,:,:-scale,scale:]],1)
weight_rt=self.similarity1(representation_rt)
weight_rt=torch.cat([padding2,torch.cat([weight_rt,padding1[...,scale:,:]],-1)],-2)
#target image
# representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)
# weight_r_target=self.similarity1(representation_r_target)
# weight_r_target=torch.cat([weight_r_target,padding1],-1)
#left-bottom
#reference
representation_lb=torch.cat([lr_feature[:,:,scale:,:-scale],hr_feature[:,:,:-scale:,scale:],distance_matrix7[:,:,scale:,:-scale]],1)
weight_lb=self.similarity1(representation_lb)
weight_lb=torch.cat([torch.cat([padding1[...,scale:,:],weight_lb],-1),padding2],-2)
#target
# representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
# weight_t_target=self.similarity1(representation_t_target)
# weight_t_target=torch.cat([padding2,weight_t_target],-2)
#right-bottom
#reference
representation_rb=torch.cat([lr_feature[:,:,scale:,scale:],hr_feature[:,:,:-scale,:-scale],distance_matrix8[:,:,scale:,scale:]],1)
weight_rb=self.similarity1(representation_rb)
weight_rb=torch.cat([torch.cat([weight_rb,padding1[...,:-scale,:]],-1),padding2],-2)
weight_all=torch.cat([weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb],dim=1)
weight_norm=F.softmax(weight_all, dim=1)
#weight_fuse=F.softmax(weight_norm*weight_all)
#target
# representation_b_target=torch.cat([lr_feature_r[:,:,scale:,:],hr_feature_r[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)
# weight_b_target=self.similarity1(representation_b_target)
# weight_b_target=torch.cat([weight_b_target,padding2],-2)
# weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)
# weight_norm=F.softmax(weight_all, dim=1)
# weight_all_target=torch.cat([weight_target,weight_r_target,weight_l_target,weight_t_target,weight_b_target],dim=1)
# weight_norm_target=F.softmax(weight_all_target, dim=1)
# return weight*weight_norm[:,0:1,:,:],weight_target*weight_norm_target[:,0:1,:,:], \
# weight_r*weight_norm[:,1:2,:,:],weight_r_target*weight_norm_target[:,1:2,:,:], \
# weight_l*weight_norm[:,2:3,:,:],weight_l_target*weight_norm_target[:,2:3,:,:], \
# weight_t*weight_norm[:,3:4,:,:],weight_t_target*weight_norm_target[:,3:4,:,:], \
# weight_b*weight_norm[:,4:5,:,:],weight_b_target*weight_norm_target[:,4:5,:,:]
# return self.sigmoid(weight)*weight_norm[:,0:1,...], \
# self.sigmoid(weight_l)*weight_norm[:,1:2,...], \
# self.sigmoid(weight_r)*weight_norm[:,2:3,...], \
# self.sigmoid(weight_t)*weight_norm[:,3:4,...], \
# self.sigmoid(weight_b)*weight_norm[:,4:5,...],\
# self.sigmoid(weight_lt)*weight_norm[:,5:6,...], \
# self.sigmoid(weight_rt)*weight_norm[:,6:7,...], \
# self.sigmoid(weight_lb)*weight_norm[:,7:8,...], \
# self.sigmoid(weight_rb)*weight_norm[:,8:9,...]
#print(torch.mean(torch.max(weight_norm,dim=1)[0]),torch.max(weight_all,dim=1)[0])
#print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))
#print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))
if torch.mean(torch.topk(weight_all,1,dim=1)[0].float())<0:
print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))
print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))
#print(torch.mean(torch.min(weight_norm,dim=1)[0]),torch.min(weight_all,dim=1)[0])
return weight_norm[:,0:1,...], \
weight_norm[:,1:2,...], \
weight_norm[:,2:3,...], \
weight_norm[:,3:4,...], \
weight_norm[:,4:5,...],\
weight_norm[:,5:6,...], \
weight_norm[:,6:7,...], \
weight_norm[:,7:8,...], \
weight_norm[:,8:9,...]
class cmfsm(nn.Module):
def __init__(self,
maxdisp=192):
super(cmfsm, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction()
self.dres0 = nn.Sequential(
convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = hourglass(32)
self.dres3 = hourglass(32)
self.dres4 = hourglass(32)
self.classif1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif2 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif3 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.mapping_matrix=eight_related_context_mapping()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * \
m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right):
start=time.time()
refimg_fea, half,all_feature= self.feature_extraction(left)
targetimg_fea, _ ,all_feature_right= self.feature_extraction(right)
scale=all_feature.shape[-1]//refimg_fea.shape[-1]
#mapping,mapping_r,mapping_l,mapping_t,mapping_b=self.mapping_matrix(refimg_fea,all_feature)
#target
#[mapping,mapping_r,mapping_l,mapping_t,mapping_b],[mapping_target,mapping_target_r,mapping_target_l]=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
#time=0.1s
weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
#mapping,mapping_target=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
# matching
cost = Variable(
torch.FloatTensor(refimg_fea.size()[0],
refimg_fea.size()[1] * 2, self.maxdisp // scale,
refimg_fea.size()[2],
refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.maxdisp // scale):
if i > 0:
cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :,
i:]
cost[:, refimg_fea.size()[1]:, i, :,
i:] = targetimg_fea[:, :, :, :-i]
else:
cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.dres2(cost0, None, None)
out1 = out1 + cost0
out2, pre2, post2 = self.dres3(out1, pre1, post1)
out2 = out2 + cost0
out3, pre3, post3 = self.dres4(out2, pre1, post2)
out3 = out3 + cost0
cost1 = self.classif1(out1)
#cost2 = self.classif2(out2) + cost1
#cost3 = self.classif3(out3) + cost2
#torch.Size([1, 1, 256, 512])
# weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)
# weight_norm=F.softmax(weight_all, dim=1)
# t=time.time()
cost1 = torch.squeeze(cost1, 1)
pred1 = F.softmax(cost1, dim=1)
pred1 = disparityregression(self.maxdisp//scale)(pred1)
#torch.Size([1, 64, 128])
pred1=scale*pred1.unsqueeze(-1).expand(pred1.shape[0],pred1.shape[1],pred1.shape[2],scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1],pred1.shape[2]*scale) \
.unsqueeze(-2).expand(pred1.shape[0],pred1.shape[1],scale,pred1.shape[2]*scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1]*scale,pred1.shape[2]*scale)
pred1_map=pred1*weight
pred1_map[...,scale:]+=pred1[...,:-scale]*weight_l[...,scale:]
pred1_map[...,:-scale]+=pred1[...,scale:]*weight_r[...,:-scale]
pred1_map[...,scale:,:]+=pred1[...,:-scale,:]*weight_t[...,scale:,:]
pred1_map[...,:-scale,:]+=pred1[...,scale:,:]*weight_b[...,:-scale,:]
pred1_map[...,scale:,scale:]+=pred1[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred1_map[...,scale:,:-scale]+=pred1[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred1_map[...,:-scale,scale:]+=pred1[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred1_map[...,:-scale,:-scale]+=pred1[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost2 = self.classif2(out2)
cost2 = torch.squeeze(cost2, 1)+cost1
pred2 = F.softmax(cost2, dim=1)
pred2 = disparityregression(self.maxdisp//scale)(pred2)
pred2=scale*pred2.unsqueeze(-1).expand(pred2.shape[0],pred2.shape[1],pred2.shape[2],scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1],pred2.shape[2]*scale) \
.unsqueeze(-2).expand(pred2.shape[0],pred2.shape[1],scale,pred2.shape[2]*scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1]*scale,pred2.shape[2]*scale)
pred2_map=pred2*weight
pred2_map[...,scale:]+=pred2[...,:-scale]*weight_l[...,scale:]
pred2_map[...,:-scale]+=pred2[...,scale:]*weight_r[...,:-scale]
pred2_map[...,scale:,:]+=pred2[...,:-scale,:]*weight_t[...,scale:,:]
pred2_map[...,:-scale,:]+=pred2[...,scale:,:]*weight_b[...,:-scale,:]
pred2_map[...,scale:,scale:]+=pred2[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred2_map[...,scale:,:-scale]+=pred2[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred2_map[...,:-scale,scale:]+=pred2[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred2_map[...,:-scale,:-scale]+=pred2[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost3 = self.classif3(out3)
cost3 = torch.squeeze(cost3, 1)+cost2
pred3 = F.softmax(cost3, dim=1)
# print(torch.max(pred3,dim=1)[0])
# print(torch.min(pred3,dim=1)[0])
pred3 = disparityregression(self.maxdisp//scale)(pred3)
pred3=scale*pred3.unsqueeze(-1).expand(pred3.shape[0],pred3.shape[1],pred3.shape[2],scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1],pred3.shape[2]*scale) \
.unsqueeze(-2).expand(pred3.shape[0],pred3.shape[1],scale,pred3.shape[2]*scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1]*scale,pred3.shape[2]*scale)
pred3_map=pred3*weight
pred3_map[...,scale:]+=pred3[...,:-scale]*weight_l[...,scale:]
pred3_map[...,:-scale]+=pred3[...,scale:]*weight_r[...,:-scale]
pred3_map[...,scale:,:]+=pred3[...,:-scale,:]*weight_t[...,scale:,:]
pred3_map[...,:-scale,:]+=pred3[...,scale:,:]*weight_b[...,:-scale,:]
pred3_map[...,scale:,scale:]+=pred3[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred3_map[...,scale:,:-scale]+=pred3[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred3_map[...,:-scale,scale:]+=pred3[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred3_map[...,:-scale,:-scale]+=pred3[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
#pred3 = self.srr(pred3, left, refimg_fea, half)
#print(time.time()-start)
return pred1_map, pred2_map, pred3_map
#return pred3
| 44.897304
| 185
| 0.587277
|
import time
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from torch.nn.functional import cosine_similarity as cosine_s
from cmf import caffe_pb2
from cmf.models.utils import *
rsn_specs = {
'scene':
{
'n_classes': 9,
'input_size': (540, 960),
'block_config': [3, 4, 23, 3],
},
}
group_dim=32
pramid_dim=8
group_norm_group_num = 32
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(
nn.Conv3d(
in_planes,
out_planes,
kernel_size=kernel_size,
padding=pad,
stride=stride,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(
torch.index_select(
left, 3,
Variable(torch.LongTensor(
[i for i in range(shift, width)])).cuda()),
(shift, 0, 0, 0))
shifted_right = F.pad(
torch.index_select(
right, 3,
Variable(torch.LongTensor(
[i for i in range(width - shift)])).cuda()),
(shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(
batch, filters * 2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super().__init__()
self.disp = Variable(
torch.Tensor(
np.reshape(np.array(range(maxdisp)),
[1, maxdisp, 1, 1])).cuda(),
requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.firstconv = nn.Sequential(
convbn(3, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1, bias=False))
self.secondconv = nn.Sequential(
nn.GroupNorm(group_dim, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.branch1 = nn.Sequential(
nn.AvgPool2d((64, 64), stride=(64, 64)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(
nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(
nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(
nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(
convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.GroupNorm(group_norm_group_num, planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
output_all = self.firstconv(x)
output=self.secondconv(output_all)
output_rt = self.layer1(output)
output_raw = self.layer2(output_rt)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(
output_branch1, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(
output_branch2, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(
output_branch3, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(
output_branch4, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_feature = torch.cat(
(output_raw, output_skip, output_branch4, output_branch3,
output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature, output_rt,output_all
class hourglass(nn.Module):
def __init__(self, inplanes):
super().__init__()
self.conv1 = nn.Sequential(
convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes * 2))
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes,
kernel_size=3,
padding=1,
output_padding=(1,1,1),
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes))
def forward(self, x, presqu, postsqu):
out = self.conv1(x)
pre = self.conv2(out)
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre)
out = self.conv4(out)
if presqu is not None:
post = F.relu(
self.conv5(out) + presqu, inplace=True)
else:
post = F.relu(self.conv5(out) + pre, inplace=True)
out = self.conv6(post)
return out, pre, post
class similarity_measure1(nn.Module):
def __init__(self):
super(similarity_measure1, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(66, 32, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.conv3(output)
return output
class similarity_measure2(nn.Module):
def __init__(self):
super(similarity_measure2, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(3, 3, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(3, 2, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
return output
def matrix_generation():
scale=4
x=torch.arange(-scale//2,scale//2+1).float()
x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
distance_matrix=distance_matrix.unsqueeze(0)
distance_matrix1=distance_matrix+0
distance_matrix2=distance_matrix+0
distance_matrix3=distance_matrix+0
distance_matrix4=distance_matrix+0
distance_matrix5=distance_matrix+0
distance_matrix6=distance_matrix+0
distance_matrix7=distance_matrix+0
distance_matrix8=distance_matrix+0
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0)
distance_matrix1[:,0,:,:]=scale-x+1
distance_matrix2[:,0,:,:]=x
distance_matrix5[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix6[:,0,:,:]=distance_matrix1[:,0,:,:]
distance_matrix7[:,0,:,:]=distance_matrix2[:,0,:,:]
distance_matrix8[:,0,:,:]=distance_matrix1[:,0,:,:]
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0).transpose(2,1)
distance_matrix3[:,1,:,:]=(scale-x+1)
distance_matrix4[:,1,:,:]=x
distance_matrix5[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix6[:,1,:,:]=distance_matrix3[:,1,:,:]
distance_matrix7[:,1,:,:]=distance_matrix4[:,1,:,:]
distance_matrix8[:,1,:,:]=distance_matrix4[:,1,:,:]
return distance_matrix.cuda(),distance_matrix1.cuda(),distance_matrix2.cuda(),distance_matrix3.cuda(),distance_matrix4.cuda(), \
distance_matrix5.cuda(),distance_matrix6.cuda(),distance_matrix7.cuda(),distance_matrix8.cuda()
class eight_related_context_mapping(nn.Module):
def __init__(self):
super(eight_related_context_mapping,self).__init__()
self.similarity1=similarity_measure1()
self.sigmoid=nn.Sigmoid()
self.distance_matrix,self.distance_matrix1,self.distance_matrix2,self.distance_matrix3,self.distance_matrix4, \
self.distance_matrix5,self.distance_matrix6,self.distance_matrix7,self.distance_matrix8=matrix_generation()
def forward(self, lr_feature, hr_feature,lr_feature_r, hr_feature_r):
with torch.no_grad():
scale=hr_feature.shape[-1]//lr_feature.shape[-1]
if scale%2!=0:
exit()
padding1=hr_feature[:,:1,:,:scale]*0-100
padding2=hr_feature[:,:1,:scale,:]*0-100
distance_matrix=self.distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix1=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix2=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix3=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix4=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix5=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix6=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix7=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
distance_matrix8=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()
lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \
.unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)
representation=torch.cat([lr_feature,hr_feature,distance_matrix],1)
weight=self.similarity1(representation)
representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],distance_matrix1[:,:,:,:-scale]],1)
weight_l=self.similarity1(representation_l)
weight_l=torch.cat([padding1,weight_l],-1)
representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],distance_matrix2[:,:,:,scale:]],1)
weight_r=self.similarity1(representation_r)
weight_r=torch.cat([weight_r,padding1],-1)
representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
weight_t=self.similarity1(representation_t)
weight_t=torch.cat([padding2,weight_t],-2)
representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)
weight_b=self.similarity1(representation_b)
weight_b=torch.cat([weight_b,padding2],-2)
representation_lt=torch.cat([lr_feature[:,:,:-scale,:-scale],hr_feature[:,:,scale:,scale:],distance_matrix5[:,:,:-scale,:-scale]],1)
weight_lt=self.similarity1(representation_lt)
weight_lt=torch.cat([padding2,torch.cat([padding1[...,scale:,:],weight_lt],-1)],-2)
representation_rt=torch.cat([lr_feature[:,:,:-scale,scale:],hr_feature[:,:,scale:,:-scale],distance_matrix6[:,:,:-scale,scale:]],1)
weight_rt=self.similarity1(representation_rt)
weight_rt=torch.cat([padding2,torch.cat([weight_rt,padding1[...,scale:,:]],-1)],-2)
representation_lb=torch.cat([lr_feature[:,:,scale:,:-scale],hr_feature[:,:,:-scale:,scale:],distance_matrix7[:,:,scale:,:-scale]],1)
weight_lb=self.similarity1(representation_lb)
weight_lb=torch.cat([torch.cat([padding1[...,scale:,:],weight_lb],-1),padding2],-2)
representation_rb=torch.cat([lr_feature[:,:,scale:,scale:],hr_feature[:,:,:-scale,:-scale],distance_matrix8[:,:,scale:,scale:]],1)
weight_rb=self.similarity1(representation_rb)
weight_rb=torch.cat([torch.cat([weight_rb,padding1[...,:-scale,:]],-1),padding2],-2)
weight_all=torch.cat([weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb],dim=1)
weight_norm=F.softmax(weight_all, dim=1)
if torch.mean(torch.topk(weight_all,1,dim=1)[0].float())<0:
print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))
print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))
return weight_norm[:,0:1,...], \
weight_norm[:,1:2,...], \
weight_norm[:,2:3,...], \
weight_norm[:,3:4,...], \
weight_norm[:,4:5,...],\
weight_norm[:,5:6,...], \
weight_norm[:,6:7,...], \
weight_norm[:,7:8,...], \
weight_norm[:,8:9,...]
class cmfsm(nn.Module):
def __init__(self,
maxdisp=192):
super(cmfsm, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction()
self.dres0 = nn.Sequential(
convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = hourglass(32)
self.dres3 = hourglass(32)
self.dres4 = hourglass(32)
self.classif1 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif2 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif3 = nn.Sequential(
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.mapping_matrix=eight_related_context_mapping()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * \
m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right):
start=time.time()
refimg_fea, half,all_feature= self.feature_extraction(left)
targetimg_fea, _ ,all_feature_right= self.feature_extraction(right)
scale=all_feature.shape[-1]//refimg_fea.shape[-1]
weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)
cost = Variable(
torch.FloatTensor(refimg_fea.size()[0],
refimg_fea.size()[1] * 2, self.maxdisp // scale,
refimg_fea.size()[2],
refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.maxdisp // scale):
if i > 0:
cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :,
i:]
cost[:, refimg_fea.size()[1]:, i, :,
i:] = targetimg_fea[:, :, :, :-i]
else:
cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.dres2(cost0, None, None)
out1 = out1 + cost0
out2, pre2, post2 = self.dres3(out1, pre1, post1)
out2 = out2 + cost0
out3, pre3, post3 = self.dres4(out2, pre1, post2)
out3 = out3 + cost0
cost1 = self.classif1(out1)
cost1 = torch.squeeze(cost1, 1)
pred1 = F.softmax(cost1, dim=1)
pred1 = disparityregression(self.maxdisp//scale)(pred1)
pred1=scale*pred1.unsqueeze(-1).expand(pred1.shape[0],pred1.shape[1],pred1.shape[2],scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1],pred1.shape[2]*scale) \
.unsqueeze(-2).expand(pred1.shape[0],pred1.shape[1],scale,pred1.shape[2]*scale) \
.contiguous().view(pred1.shape[0],pred1.shape[1]*scale,pred1.shape[2]*scale)
pred1_map=pred1*weight
pred1_map[...,scale:]+=pred1[...,:-scale]*weight_l[...,scale:]
pred1_map[...,:-scale]+=pred1[...,scale:]*weight_r[...,:-scale]
pred1_map[...,scale:,:]+=pred1[...,:-scale,:]*weight_t[...,scale:,:]
pred1_map[...,:-scale,:]+=pred1[...,scale:,:]*weight_b[...,:-scale,:]
pred1_map[...,scale:,scale:]+=pred1[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred1_map[...,scale:,:-scale]+=pred1[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred1_map[...,:-scale,scale:]+=pred1[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred1_map[...,:-scale,:-scale]+=pred1[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost2 = self.classif2(out2)
cost2 = torch.squeeze(cost2, 1)+cost1
pred2 = F.softmax(cost2, dim=1)
pred2 = disparityregression(self.maxdisp//scale)(pred2)
pred2=scale*pred2.unsqueeze(-1).expand(pred2.shape[0],pred2.shape[1],pred2.shape[2],scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1],pred2.shape[2]*scale) \
.unsqueeze(-2).expand(pred2.shape[0],pred2.shape[1],scale,pred2.shape[2]*scale) \
.contiguous().view(pred2.shape[0],pred2.shape[1]*scale,pred2.shape[2]*scale)
pred2_map=pred2*weight
pred2_map[...,scale:]+=pred2[...,:-scale]*weight_l[...,scale:]
pred2_map[...,:-scale]+=pred2[...,scale:]*weight_r[...,:-scale]
pred2_map[...,scale:,:]+=pred2[...,:-scale,:]*weight_t[...,scale:,:]
pred2_map[...,:-scale,:]+=pred2[...,scale:,:]*weight_b[...,:-scale,:]
pred2_map[...,scale:,scale:]+=pred2[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred2_map[...,scale:,:-scale]+=pred2[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred2_map[...,:-scale,scale:]+=pred2[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred2_map[...,:-scale,:-scale]+=pred2[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
cost3 = self.classif3(out3)
cost3 = torch.squeeze(cost3, 1)+cost2
pred3 = F.softmax(cost3, dim=1)
pred3 = disparityregression(self.maxdisp//scale)(pred3)
pred3=scale*pred3.unsqueeze(-1).expand(pred3.shape[0],pred3.shape[1],pred3.shape[2],scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1],pred3.shape[2]*scale) \
.unsqueeze(-2).expand(pred3.shape[0],pred3.shape[1],scale,pred3.shape[2]*scale) \
.contiguous().view(pred3.shape[0],pred3.shape[1]*scale,pred3.shape[2]*scale)
pred3_map=pred3*weight
pred3_map[...,scale:]+=pred3[...,:-scale]*weight_l[...,scale:]
pred3_map[...,:-scale]+=pred3[...,scale:]*weight_r[...,:-scale]
pred3_map[...,scale:,:]+=pred3[...,:-scale,:]*weight_t[...,scale:,:]
pred3_map[...,:-scale,:]+=pred3[...,scale:,:]*weight_b[...,:-scale,:]
pred3_map[...,scale:,scale:]+=pred3[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]
pred3_map[...,scale:,:-scale]+=pred3[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]
pred3_map[...,:-scale,scale:]+=pred3[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]
pred3_map[...,:-scale,:-scale]+=pred3[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]
return pred1_map, pred2_map, pred3_map
| true
| true
|
f715da8614fe069c6b1f18f8b31418a56d1297bc
| 2,432
|
py
|
Python
|
kornia/augmentation/_3d/base.py
|
dichen-cd/kornia
|
dcd1c5e17cf4d2ae2db1f438c53245bba0afd93f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-02-10T02:02:06.000Z
|
2022-02-10T02:02:06.000Z
|
kornia/augmentation/_3d/base.py
|
dichen-cd/kornia
|
dcd1c5e17cf4d2ae2db1f438c53245bba0afd93f
|
[
"ECL-2.0",
"Apache-2.0"
] | 14
|
2021-09-26T11:07:56.000Z
|
2022-03-20T11:11:15.000Z
|
kornia/augmentation/_3d/base.py
|
dichen-cd/kornia
|
dcd1c5e17cf4d2ae2db1f438c53245bba0afd93f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import torch
import kornia
from kornia.augmentation.base import TensorWithTransformMat, _AugmentationBase
from kornia.augmentation.utils import _transform_input3d, _validate_input_dtype
class AugmentationBase3D(_AugmentationBase):
r"""AugmentationBase3D base class for customized augmentation implementations.
For any augmentation, the implementation of "generate_parameters" and "apply_transform" are required while the
"compute_transformation" is only required when passing "return_transform" as True.
Args:
p: probability for applying an augmentation. This param controls the augmentation probabilities
element-wise for a batch.
p_batch: probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wise.
return_transform: if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation won't be concatenated.
same_on_batch: apply the same transformation across the batch.
"""
def __check_batching__(self, input: TensorWithTransformMat):
if isinstance(input, tuple):
inp, mat = input
if len(inp.shape) == 5:
if len(mat.shape) != 3:
raise AssertionError('Input tensor is in batch mode ' 'but transformation matrix is not')
if mat.shape[0] != inp.shape[0]:
raise AssertionError(
f"In batch dimension, input has {inp.shape[0]} but transformation matrix has {mat.shape[0]}"
)
elif len(inp.shape) in (3, 4):
if len(mat.shape) != 2:
raise AssertionError("Input tensor is in non-batch mode but transformation matrix is not")
else:
raise ValueError(f'Unrecognized output shape. Expected 3, 4 or 5, got {len(inp.shape)}')
def transform_tensor(self, input: torch.Tensor) -> torch.Tensor:
"""Convert any incoming (D, H, W), (C, D, H, W) and (B, C, D, H, W) into (B, C, D, H, W)."""
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
return _transform_input3d(input)
def identity_matrix(self, input) -> torch.Tensor:
"""Return 4x4 identity matrix."""
return kornia.eye_like(4, input)
| 50.666667
| 116
| 0.665707
|
import torch
import kornia
from kornia.augmentation.base import TensorWithTransformMat, _AugmentationBase
from kornia.augmentation.utils import _transform_input3d, _validate_input_dtype
class AugmentationBase3D(_AugmentationBase):
def __check_batching__(self, input: TensorWithTransformMat):
if isinstance(input, tuple):
inp, mat = input
if len(inp.shape) == 5:
if len(mat.shape) != 3:
raise AssertionError('Input tensor is in batch mode ' 'but transformation matrix is not')
if mat.shape[0] != inp.shape[0]:
raise AssertionError(
f"In batch dimension, input has {inp.shape[0]} but transformation matrix has {mat.shape[0]}"
)
elif len(inp.shape) in (3, 4):
if len(mat.shape) != 2:
raise AssertionError("Input tensor is in non-batch mode but transformation matrix is not")
else:
raise ValueError(f'Unrecognized output shape. Expected 3, 4 or 5, got {len(inp.shape)}')
def transform_tensor(self, input: torch.Tensor) -> torch.Tensor:
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
return _transform_input3d(input)
def identity_matrix(self, input) -> torch.Tensor:
return kornia.eye_like(4, input)
| true
| true
|
f715dbd84fff6809be6f2c5c95ceb8898c2ac604
| 2,563
|
gyp
|
Python
|
binding.gyp
|
sumeetkakkar/node-krb5
|
3c13021b3fcd3be239d3c731455154910f4d03b6
|
[
"BSD-3-Clause"
] | null | null | null |
binding.gyp
|
sumeetkakkar/node-krb5
|
3c13021b3fcd3be239d3c731455154910f4d03b6
|
[
"BSD-3-Clause"
] | null | null | null |
binding.gyp
|
sumeetkakkar/node-krb5
|
3c13021b3fcd3be239d3c731455154910f4d03b6
|
[
"BSD-3-Clause"
] | 1
|
2019-08-29T18:45:47.000Z
|
2019-08-29T18:45:47.000Z
|
{
"targets": [{
"target_name": "krb5",
"sources": [
"./src/module.cc",
"./src/krb5_bind.cc",
"./src/gss_bind.cc",
"./src/base64.cc"
],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'include_dirs': ["<!@(node -p \"require('node-addon-api').include\")"],
'dependencies': ["<!(node -p \"require('node-addon-api').gyp\")"],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'defines': [
'NAPI_DISABLE_CPP_EXCEPTIONS'
],
"conditions": [
[
"OS=='win'",
{
"variables": {
"KRB_PATH": "/Program Files/MIT/Kerberos"
},
"include_dirs": ["<(KRB_PATH)/include", "<(KRB_PATH)/include/gssapi", "src"],
"conditions": [
[
"target_arch=='x64'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_64.lib", "-lgssapi64.lib"]
}
],
[
"target_arch=='ia32'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_32.lib", "-lgssapi32.lib"]
}
]
]
}
],
[
"OS!='win'",
{
"libraries": ["-lkrb5", "-lgssapi_krb5"]
}
]
]
}]
}
| 36.614286
| 98
| 0.285603
|
{
"targets": [{
"target_name": "krb5",
"sources": [
"./src/module.cc",
"./src/krb5_bind.cc",
"./src/gss_bind.cc",
"./src/base64.cc"
],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'include_dirs': ["<!@(node -p \"require('node-addon-api').include\")"],
'dependencies': ["<!(node -p \"require('node-addon-api').gyp\")"],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
'defines': [
'NAPI_DISABLE_CPP_EXCEPTIONS'
],
"conditions": [
[
"OS=='win'",
{
"variables": {
"KRB_PATH": "/Program Files/MIT/Kerberos"
},
"include_dirs": ["<(KRB_PATH)/include", "<(KRB_PATH)/include/gssapi", "src"],
"conditions": [
[
"target_arch=='x64'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_64.lib", "-lgssapi64.lib"]
}
],
[
"target_arch=='ia32'",
{
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/MP /EHsc"]
},
"VCLinkerTool": {
"AdditionalLibraryDirectories": ["<(KRB_PATH)/lib/amd64/"]
}
},
"libraries": ["-lkrb5_32.lib", "-lgssapi32.lib"]
}
]
]
}
],
[
"OS!='win'",
{
"libraries": ["-lkrb5", "-lgssapi_krb5"]
}
]
]
}]
}
| true
| true
|
f715dc50ff4886ddbbcf5f5817f1d0e1a2b60106
| 461
|
py
|
Python
|
jdxapi/routes/__init__.py
|
jobdataexchange/jdx-api
|
7815a6463de56423c3b4196648607c4ebe56828c
|
[
"Apache-2.0"
] | null | null | null |
jdxapi/routes/__init__.py
|
jobdataexchange/jdx-api
|
7815a6463de56423c3b4196648607c4ebe56828c
|
[
"Apache-2.0"
] | 9
|
2019-12-26T17:39:58.000Z
|
2022-01-13T01:59:49.000Z
|
jdxapi/routes/__init__.py
|
jobdataexchange/jdx-api
|
7815a6463de56423c3b4196648607c4ebe56828c
|
[
"Apache-2.0"
] | null | null | null |
from jdxapi.routes.health import *
from jdxapi.routes.upload_job_description_file import *
from jdxapi.routes.upload_job_description_context import *
from jdxapi.routes.framework_recommendations import *
from jdxapi.routes.framework_selections import *
from jdxapi.routes.generate_job_schema_plus import *
from jdxapi.routes.get_score import *
from jdxapi.routes.match_table import *
from jdxapi.routes.user_actions import *
from jdxapi.routes.preview import *
| 41.909091
| 58
| 0.848156
|
from jdxapi.routes.health import *
from jdxapi.routes.upload_job_description_file import *
from jdxapi.routes.upload_job_description_context import *
from jdxapi.routes.framework_recommendations import *
from jdxapi.routes.framework_selections import *
from jdxapi.routes.generate_job_schema_plus import *
from jdxapi.routes.get_score import *
from jdxapi.routes.match_table import *
from jdxapi.routes.user_actions import *
from jdxapi.routes.preview import *
| true
| true
|
f715deb9771158a547fcbbc301e8725ecd0fded2
| 3,896
|
py
|
Python
|
Python/PyParsing/node_utils.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 115
|
2015-03-23T13:34:42.000Z
|
2022-03-21T00:27:21.000Z
|
Python/PyParsing/node_utils.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 56
|
2015-02-25T15:04:26.000Z
|
2022-01-03T07:42:48.000Z
|
Python/PyParsing/node_utils.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 59
|
2015-11-26T11:44:51.000Z
|
2022-03-21T00:27:22.000Z
|
#!/usr/bin/env python
'''module containing various functions for working with trees and nodes'''
from node_parser import NodeParser
import unittest
def depth(node):
'''compute the depth of the given tree'''
if node is None:
return 0
elif node.is_leaf():
return 1
else:
return 1 + max(list(map(depth, node.children())))
def depth_first_iterator(node):
'''returns an depth-first itreator over the node and its children'''
if node is not None:
node_stack = [(node, -1)]
while len(node_stack) > 0:
node, child_index = node_stack.pop()
if child_index == -1:
if not node.is_leaf():
node_stack.append((node, child_index + 1))
yield node
elif child_index < node.nr_children():
node_stack.append((node, child_index + 1))
node_stack.append((node.child(child_index), -1))
def nr_leaf_nodes(start_node):
'''returns the number of leaf nodes starting form the given node'''
nr = 0
for node in depth_first_iterator(start_node):
if node.is_leaf():
nr += 1
return nr
class DepthTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(depth(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(depth(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(depth(tree), 3)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(depth(tree), 4)
class DepthFirstIteratorTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, [])
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1'])
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4'])
class NrLeafsTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 4)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
if __name__ == '__main__':
unittest.main()
| 28.647059
| 74
| 0.566992
|
from node_parser import NodeParser
import unittest
def depth(node):
if node is None:
return 0
elif node.is_leaf():
return 1
else:
return 1 + max(list(map(depth, node.children())))
def depth_first_iterator(node):
if node is not None:
node_stack = [(node, -1)]
while len(node_stack) > 0:
node, child_index = node_stack.pop()
if child_index == -1:
if not node.is_leaf():
node_stack.append((node, child_index + 1))
yield node
elif child_index < node.nr_children():
node_stack.append((node, child_index + 1))
node_stack.append((node.child(child_index), -1))
def nr_leaf_nodes(start_node):
nr = 0
for node in depth_first_iterator(start_node):
if node.is_leaf():
nr += 1
return nr
class DepthTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(depth(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(depth(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(depth(tree), 3)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(depth(tree), 4)
class DepthFirstIteratorTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, [])
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1'])
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
nodes = []
for node in depth_first_iterator(tree):
nodes.append(node.name)
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4'])
class NrLeafsTest(unittest.TestCase):
def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 0)
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 4)
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
self.assertEqual(nr_leaf_nodes(tree), 1)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f715df136e0602e32db3dc820ce6e68ce0ad5f80
| 1,342
|
py
|
Python
|
LeetCodeSolver/pythonSolutions/from1to100/Solution74.py
|
ZeromaXHe/Learning-Platform
|
ec75c2dbd472a568d1cd482450cc471295659c62
|
[
"Apache-2.0"
] | null | null | null |
LeetCodeSolver/pythonSolutions/from1to100/Solution74.py
|
ZeromaXHe/Learning-Platform
|
ec75c2dbd472a568d1cd482450cc471295659c62
|
[
"Apache-2.0"
] | null | null | null |
LeetCodeSolver/pythonSolutions/from1to100/Solution74.py
|
ZeromaXHe/Learning-Platform
|
ec75c2dbd472a568d1cd482450cc471295659c62
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
class Solution:
"""
74.搜索二维矩阵 | 难度:中等 | 标签:数组、二分查找
编写一个高效的算法来判断 m x n 矩阵中,是否存在一个目标值。该矩阵具有如下特性:
<p>
每行中的整数从左到右按升序排列。
每行的第一个整数大于前一行的最后一个整数。
<p>
示例 1:
输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 3
输出:true
<p>
示例 2:
输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 13
输出:false
<p>
提示:
m == matrix.length
n == matrix[i].length
1 <= m, n <= 100
-10^4 <= matrix[i][j], target <= 10^4
<p>
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/search-a-2d-matrix
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
"""
执行用时: 44 ms , 在所有 Python3 提交中击败了 36.61% 的用户
内存消耗: 15.1 MB , 在所有 Python3 提交中击败了 49.64% 的用户
:param matrix:
:param target:
:return:
"""
x = 0
y = len(matrix[0]) - 1
# 将len(matrix)提取为变量n的话,效率变高:
# 执行用时: 36 ms , 在所有 Python3 提交中击败了 83.96% 的用户
# 内存消耗: 14.8 MB , 在所有 Python3 提交中击败了 99.17% 的用户
while x < len(matrix) and y >= 0:
if matrix[x][y] == target:
return True
elif matrix[x][y] > target:
y -= 1
else:
x += 1
return False
| 25.807692
| 73
| 0.508197
|
from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
x = 0
y = len(matrix[0]) - 1
while x < len(matrix) and y >= 0:
if matrix[x][y] == target:
return True
elif matrix[x][y] > target:
y -= 1
else:
x += 1
return False
| true
| true
|
f715e0177b8228f28f6e785e8abfae78e9cd6435
| 10,435
|
py
|
Python
|
google/ads/googleads/v9/services/services/customer_customizer_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/services/customer_customizer_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/services/customer_customizer_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.services.types import customer_customizer_service
from .base import CustomerCustomizerServiceTransport, DEFAULT_CLIENT_INFO
class CustomerCustomizerServiceGrpcTransport(
CustomerCustomizerServiceTransport
):
"""gRPC backend transport for CustomerCustomizerService.
Service to manage customer customizer
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_customer_customizers(
self,
) -> Callable[
[customer_customizer_service.MutateCustomerCustomizersRequest],
customer_customizer_service.MutateCustomerCustomizersResponse,
]:
r"""Return a callable for the mutate customer customizers method over gRPC.
Creates, updates or removes customer customizers.
Operation statuses are returned.
Returns:
Callable[[~.MutateCustomerCustomizersRequest],
~.MutateCustomerCustomizersResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_customer_customizers" not in self._stubs:
self._stubs[
"mutate_customer_customizers"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.CustomerCustomizerService/MutateCustomerCustomizers",
request_serializer=customer_customizer_service.MutateCustomerCustomizersRequest.serialize,
response_deserializer=customer_customizer_service.MutateCustomerCustomizersResponse.deserialize,
)
return self._stubs["mutate_customer_customizers"]
__all__ = ("CustomerCustomizerServiceGrpcTransport",)
| 41.74
| 112
| 0.628079
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.ads.googleads.v9.services.types import customer_customizer_service
from .base import CustomerCustomizerServiceTransport, DEFAULT_CLIENT_INFO
class CustomerCustomizerServiceGrpcTransport(
CustomerCustomizerServiceTransport
):
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {}
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def mutate_customer_customizers(
self,
) -> Callable[
[customer_customizer_service.MutateCustomerCustomizersRequest],
customer_customizer_service.MutateCustomerCustomizersResponse,
]:
if "mutate_customer_customizers" not in self._stubs:
self._stubs[
"mutate_customer_customizers"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.CustomerCustomizerService/MutateCustomerCustomizers",
request_serializer=customer_customizer_service.MutateCustomerCustomizersRequest.serialize,
response_deserializer=customer_customizer_service.MutateCustomerCustomizersResponse.deserialize,
)
return self._stubs["mutate_customer_customizers"]
__all__ = ("CustomerCustomizerServiceGrpcTransport",)
| true
| true
|
f715e02ee65c789c8ebe66b9da527060e685ba50
| 475
|
py
|
Python
|
examples/sw.py
|
khirotaka/testbed
|
e32384a3267d5282fb9f2df22597dfa7fb9aa17d
|
[
"MIT"
] | null | null | null |
examples/sw.py
|
khirotaka/testbed
|
e32384a3267d5282fb9f2df22597dfa7fb9aa17d
|
[
"MIT"
] | 2
|
2020-08-09T06:26:51.000Z
|
2020-08-10T01:08:28.000Z
|
examples/sw.py
|
khirotaka/testbed
|
e32384a3267d5282fb9f2df22597dfa7fb9aa17d
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
from testbed._rust import sliding_window
x = np.random.randn(5000, 5)
s = time.time()
rustout = sliding_window(x, 100, 1)
print("=" * 50)
print("Rust Speed: ", time.time() - s)
print(rustout.shape)
def sw(array, ws, over):
sl = len(array)
return [array[i:i+ws] for i in range(0, sl-ws, over)]
print("=" * 50)
s = time.time()
tmp = sw(x, 100, 1)
tmp = np.stack(tmp, 0)
print("Python Speed: ", time.time() - s)
print(tmp.shape)
| 17.592593
| 57
| 0.633684
|
import time
import numpy as np
from testbed._rust import sliding_window
x = np.random.randn(5000, 5)
s = time.time()
rustout = sliding_window(x, 100, 1)
print("=" * 50)
print("Rust Speed: ", time.time() - s)
print(rustout.shape)
def sw(array, ws, over):
sl = len(array)
return [array[i:i+ws] for i in range(0, sl-ws, over)]
print("=" * 50)
s = time.time()
tmp = sw(x, 100, 1)
tmp = np.stack(tmp, 0)
print("Python Speed: ", time.time() - s)
print(tmp.shape)
| true
| true
|
f715e0b6071694faa85c51616ffa6eb6433f5b4c
| 357
|
py
|
Python
|
DMOJ/DMOPC/DMOPC_19_C5P3_Captivating_Construction_Challenge.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | null | null | null |
DMOJ/DMOPC/DMOPC_19_C5P3_Captivating_Construction_Challenge.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | 1
|
2021-10-14T18:26:56.000Z
|
2021-10-14T18:26:56.000Z
|
DMOJ/DMOPC/DMOPC_19_C5P3_Captivating_Construction_Challenge.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | 1
|
2021-08-06T03:39:55.000Z
|
2021-08-06T03:39:55.000Z
|
def gcd(m, n):
if n == 0:
return m
return gcd(n, m%n)
ans = 0
H, V = map(int, input().split())
for x in range(H):
for y in range(1, V):
mx, my = y//gcd(x, y), x//gcd(x, y)
xx, yy = mx+x, my+y
while xx <= H and yy <= V:
ans += (H-xx) * (V-yy)
xx += mx
yy += my
print(ans)
| 16.227273
| 43
| 0.40056
|
def gcd(m, n):
if n == 0:
return m
return gcd(n, m%n)
ans = 0
H, V = map(int, input().split())
for x in range(H):
for y in range(1, V):
mx, my = y//gcd(x, y), x//gcd(x, y)
xx, yy = mx+x, my+y
while xx <= H and yy <= V:
ans += (H-xx) * (V-yy)
xx += mx
yy += my
print(ans)
| true
| true
|
f715e0ba86825ddedbad3acbdb1a48496d9dfaa8
| 1,368
|
py
|
Python
|
fetch_cast_html.py
|
nmaswood/tv_scraping
|
91573df0ca9512ac1744cddc8635f681d8ed596a
|
[
"Apache-2.0"
] | null | null | null |
fetch_cast_html.py
|
nmaswood/tv_scraping
|
91573df0ca9512ac1744cddc8635f681d8ed596a
|
[
"Apache-2.0"
] | null | null | null |
fetch_cast_html.py
|
nmaswood/tv_scraping
|
91573df0ca9512ac1744cddc8635f681d8ed596a
|
[
"Apache-2.0"
] | null | null | null |
from urllib.request import FancyURLopener
from bs4 import BeautifulSoup
from random import choice
import csv
from time import sleep
from urllib.parse import quote,unquote
import json
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
]
class MyOpener(FancyURLopener, object):
version = choice(user_agents)
myopener = MyOpener()
def _ids():
with open("meta_final.csv", 'r') as infile:
tv_reader = csv.reader(infile)
next(tv_reader)
return list(map(lambda x : x[-1], tv_reader))
def fetch_cast_data():
for index, _id in enumerate(_ids()):
print (index)
url ='http://www.imdb.com/title/{}/fullcredits?ref_=tt_ql_1'.format(_id)
try:
html = myopener.open(url).read()
except:
html = "error"
with open('data/' + _id + '.html', 'wb') as outfile:
outfile.write(html)
sleep(.5)
fetch_cast_data()
| 31.090909
| 110
| 0.646199
|
from urllib.request import FancyURLopener
from bs4 import BeautifulSoup
from random import choice
import csv
from time import sleep
from urllib.parse import quote,unquote
import json
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
]
class MyOpener(FancyURLopener, object):
version = choice(user_agents)
myopener = MyOpener()
def _ids():
with open("meta_final.csv", 'r') as infile:
tv_reader = csv.reader(infile)
next(tv_reader)
return list(map(lambda x : x[-1], tv_reader))
def fetch_cast_data():
for index, _id in enumerate(_ids()):
print (index)
url ='http://www.imdb.com/title/{}/fullcredits?ref_=tt_ql_1'.format(_id)
try:
html = myopener.open(url).read()
except:
html = "error"
with open('data/' + _id + '.html', 'wb') as outfile:
outfile.write(html)
sleep(.5)
fetch_cast_data()
| true
| true
|
f715e119c6f7e84008328d68567e938b4668623f
| 696
|
py
|
Python
|
informacoes_emails.py
|
katianaz/GiftHelper
|
1fbff4e7902c25950a5f50f04f0b2c834842ccbe
|
[
"MIT"
] | null | null | null |
informacoes_emails.py
|
katianaz/GiftHelper
|
1fbff4e7902c25950a5f50f04f0b2c834842ccbe
|
[
"MIT"
] | null | null | null |
informacoes_emails.py
|
katianaz/GiftHelper
|
1fbff4e7902c25950a5f50f04f0b2c834842ccbe
|
[
"MIT"
] | 1
|
2021-03-18T22:44:43.000Z
|
2021-03-18T22:44:43.000Z
|
import pontuacao_categorias
import pandas as pd
nomes = []
nomes_presenteados = []
enderecos_emails = []
for p in range(len(pontuacao_categorias.tabela.index)):
nomes.append(pontuacao_categorias.tabela['3'][p])
nomes_presenteados.append(pontuacao_categorias.tabela['4'][p])
enderecos_emails.append(pontuacao_categorias.tabela['2'][p])
informacoes = {'Nome': nomes,
'Email': enderecos_emails,
'Presenteado': nomes_presenteados,
'Sugestoes': pontuacao_categorias.sugestoes}
infos = pd.DataFrame(informacoes, columns=['Nome', 'Email', 'Presenteado', 'Sugestoes'])
infos.to_csv('infos_emails.csv', encoding='latin-1')
| 33.142857
| 89
| 0.686782
|
import pontuacao_categorias
import pandas as pd
nomes = []
nomes_presenteados = []
enderecos_emails = []
for p in range(len(pontuacao_categorias.tabela.index)):
nomes.append(pontuacao_categorias.tabela['3'][p])
nomes_presenteados.append(pontuacao_categorias.tabela['4'][p])
enderecos_emails.append(pontuacao_categorias.tabela['2'][p])
informacoes = {'Nome': nomes,
'Email': enderecos_emails,
'Presenteado': nomes_presenteados,
'Sugestoes': pontuacao_categorias.sugestoes}
infos = pd.DataFrame(informacoes, columns=['Nome', 'Email', 'Presenteado', 'Sugestoes'])
infos.to_csv('infos_emails.csv', encoding='latin-1')
| true
| true
|
f715e2b4af325720c565d744e3e3558d6ec968b2
| 11,243
|
py
|
Python
|
bookworm/annotation/annotation_gui.py
|
mush42/bookworm
|
a4bdd89363137a89a1bed1e9e072de4fb55576fd
|
[
"MIT"
] | 18
|
2019-07-19T22:12:15.000Z
|
2020-08-26T17:45:19.000Z
|
bookworm/annotation/annotation_gui.py
|
mush42/bookworm
|
a4bdd89363137a89a1bed1e9e072de4fb55576fd
|
[
"MIT"
] | 44
|
2019-07-15T10:17:00.000Z
|
2020-07-26T11:22:53.000Z
|
bookworm/annotation/annotation_gui.py
|
mush42/bookworm
|
a4bdd89363137a89a1bed1e9e072de4fb55576fd
|
[
"MIT"
] | 9
|
2019-09-03T13:13:31.000Z
|
2020-08-25T13:55:27.000Z
|
# coding: utf-8
import wx
from enum import IntEnum
from bookworm import speech
from bookworm.gui.settings import SettingsPanel
from bookworm.structured_text import TextRange
from bookworm.logger import logger
from .annotator import Bookmarker, NoteTaker, Quoter
from .annotation_dialogs import (
BookmarksViewer,
CommentsDialog,
QuotesDialog,
GenericAnnotationWithContentDialog,
)
log = logger.getChild(__name__)
class AnnotationSettingsPanel(SettingsPanel):
config_section = "annotation"
def addControls(self):
# Translators: the title of a group of controls in the
UIBox = self.make_static_box(_("Annotation"))
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Speak the bookmark when jumping"),
name="annotation.speak_bookmarks_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Select the bookmarked line when jumping"),
name="annotation.select_bookmarked_line_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use visual styles to indicate annotations"),
name="annotation.use_visuals",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use sounds to indicate the presence of comments"),
name="annotation.play_sound_for_comments",
)
class AnnotationsMenuIds(IntEnum):
addBookmark = 241
addNamedBookmark = 242
addNote = 243
quoteSelection = 244
viewBookmarks = 245
class StatelessAnnotationsMenuIds(IntEnum):
viewNotes = 246
viewQuotes = 247
ANNOTATIONS_KEYBOARD_SHORTCUTS = {
AnnotationsMenuIds.addBookmark: "Ctrl-B",
AnnotationsMenuIds.addNamedBookmark: "Ctrl-Shift-B",
AnnotationsMenuIds.addNote: "Ctrl-M",
AnnotationsMenuIds.quoteSelection: "Ctrl-H",
}
class AnnotationMenu(wx.Menu):
"""Annotation menu."""
def __init__(self, service):
super().__init__()
self.service = service
self.view = service.view
self.reader = service.reader
# Add menu items
self.Append(
AnnotationsMenuIds.addBookmark,
# Translators: the label of an item in the application menubar
_("Add &Bookmark\tCtrl-B"),
# Translators: the help text of an item in the application menubar
_("Add a bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNamedBookmark,
# Translators: the label of an item in the application menubar
_("Add &Named Bookmark...\tCtrl-Shift-B"),
# Translators: the help text of an item in the application menubar
_("Add a named bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNote,
# Translators: the label of an item in the application menubar
_("Add Co&mment...\tCtrl-M"),
# Translators: the help text of an item in the application menubar
_("Add a comment at the current position"),
)
self.Append(
AnnotationsMenuIds.quoteSelection,
# Translators: the label of an item in the application menubar
_("&Highlight Selection\tCtrl-H"),
# Translators: the help text of an item in the application menubar
_("Highlight selected text and save it."),
)
self.Append(
AnnotationsMenuIds.viewBookmarks,
# Translators: the label of an item in the application menubar
_("Saved &Bookmarks..."),
# Translators: the help text of an item in the application menubar
_("View added bookmarks"),
)
self.Append(
StatelessAnnotationsMenuIds.viewNotes,
# Translators: the label of an item in the application menubar
_("Saved Co&mments..."),
# Translators: the help text of an item in the application menubar
_("View, edit, and remove comments."),
)
self.Append(
StatelessAnnotationsMenuIds.viewQuotes,
# Translators: the label of an item in the application menubar
_("Saved &Highlights..."),
# Translators: the help text of an item in the application menubar
_("View saved highlights."),
)
# Translators: the label of an item in the application menubar
# EventHandlers
self.view.Bind(
wx.EVT_MENU, self.onAddBookmark, id=AnnotationsMenuIds.addBookmark
)
self.view.Bind(
wx.EVT_MENU, self.onAddNamedBookmark, id=AnnotationsMenuIds.addNamedBookmark
)
self.view.Bind(wx.EVT_MENU, self.onAddNote, id=AnnotationsMenuIds.addNote)
self.view.Bind(
wx.EVT_MENU, self.onQuoteSelection, id=AnnotationsMenuIds.quoteSelection
)
self.view.Bind(
wx.EVT_MENU, self.onViewBookmarks, id=AnnotationsMenuIds.viewBookmarks
)
self.view.Bind(
wx.EVT_MENU, self.onViewNotes, id=StatelessAnnotationsMenuIds.viewNotes
)
self.view.Bind(
wx.EVT_MENU, self.onViewQuotes, id=StatelessAnnotationsMenuIds.viewQuotes
)
def _add_bookmark(self, name=""):
bookmarker = Bookmarker(self.reader)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
__, __, current_lino = self.view.contentTextCtrl.PositionToXY(insertionPoint)
count = 0
for bkm in bookmarker.get_for_page(self.reader.current_page):
__, __, lino = self.view.contentTextCtrl.PositionToXY(bkm.position)
if lino == current_lino:
count += 1
bookmarker.delete(bkm.id)
self.service.style_bookmark(self.view, bkm.position, enable=False)
if count and not name:
return speech.announce(_("Bookmark removed"))
Bookmarker(self.reader).create(title=name, position=insertionPoint)
# Translators: spoken message
speech.announce(_("Bookmark Added"))
self.service.style_bookmark(self.view, insertionPoint)
def onAddBookmark(self, event):
self._add_bookmark()
def onAddNamedBookmark(self, event):
bookmark_name = self.view.get_text_from_user(
# Translators: title of a dialog
_("Add Named Bookmark"),
# Translators: label of a text entry
_("Bookmark name:"),
)
if bookmark_name:
self._add_bookmark(bookmark_name)
def onAddNote(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
comment_text = self.view.get_text_from_user(
# Translators: the title of a dialog to add a comment
_("New Comment"),
# Translators: the label of an edit field to enter a comment
_("Comment:"),
style=wx.OK | wx.CANCEL | wx.TE_MULTILINE | wx.CENTER,
)
if not comment_text:
return
note = NoteTaker(self.reader).create(
title="", content=comment_text, position=insertionPoint
)
self.service.style_comment(self.view, insertionPoint)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Comment"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
note.tags.append(tag.strip())
NoteTaker.model.session.commit()
def onQuoteSelection(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
quoter = Quoter(self.reader)
selected_text = self.view.contentTextCtrl.GetStringSelection()
if not selected_text:
return speech.announce(_("No selection"))
x, y = self.view.get_selection_range()
for q in quoter.get_for_page():
q_range = TextRange(q.start_pos, q.end_pos)
if (q_range.start == x) and (q_range.stop == y):
quoter.delete(q.id)
self.service.style_highlight(self.view, x, y, enable=False)
# Translators: spoken message
return speech.announce(_("Highlight removed"))
elif (q.start_pos < x) and (q.end_pos > y):
# Translators: spoken message
speech.announce(_("Already highlighted"))
return wx.Bell()
if (x in q_range) or (y in q_range):
if x not in q_range:
q.start_pos = x
q.session.commit()
self.service.style_highlight(self.view, x, q_range.stop)
return speech.announce(_("Highlight extended"))
elif y not in q_range:
q.end_pos = y
q.session.commit()
self.service.style_highlight(self.view, q_range.start, y)
# Translators: spoken message
return speech.announce(_("Highlight extended"))
quote = quoter.create(title="", content=selected_text, start_pos=x, end_pos=y)
# Translators: spoken message
speech.announce(_("Selection highlighted"))
self.service.style_highlight(self.view, x, y)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Highlight"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
quote.tags.append(tag.strip())
Quoter.model.session.commit()
def onViewBookmarks(self, event):
with BookmarksViewer(
parent=self.view,
reader=self.reader,
annotator=Bookmarker,
# Translators: the title of a dialog to view bookmarks
title=_("Bookmarks | {book}").format(book=self.reader.current_book.title),
) as dlg:
dlg.ShowModal()
def onViewNotes(self, event):
Dialog = (
CommentsDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Comments"),
reader=self.reader,
annotator_cls=NoteTaker,
can_edit=True,
) as dlg:
dlg.ShowModal()
def onViewQuotes(self, event):
Dialog = (
QuotesDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Highlights"),
reader=self.reader,
annotator_cls=Quoter,
) as dlg:
dlg.ShowModal()
| 37.228477
| 88
| 0.592102
|
import wx
from enum import IntEnum
from bookworm import speech
from bookworm.gui.settings import SettingsPanel
from bookworm.structured_text import TextRange
from bookworm.logger import logger
from .annotator import Bookmarker, NoteTaker, Quoter
from .annotation_dialogs import (
BookmarksViewer,
CommentsDialog,
QuotesDialog,
GenericAnnotationWithContentDialog,
)
log = logger.getChild(__name__)
class AnnotationSettingsPanel(SettingsPanel):
config_section = "annotation"
def addControls(self):
UIBox = self.make_static_box(_("Annotation"))
wx.CheckBox(
UIBox,
-1,
_("Speak the bookmark when jumping"),
name="annotation.speak_bookmarks_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
_("Select the bookmarked line when jumping"),
name="annotation.select_bookmarked_line_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
_("Use visual styles to indicate annotations"),
name="annotation.use_visuals",
)
wx.CheckBox(
UIBox,
-1,
_("Use sounds to indicate the presence of comments"),
name="annotation.play_sound_for_comments",
)
class AnnotationsMenuIds(IntEnum):
addBookmark = 241
addNamedBookmark = 242
addNote = 243
quoteSelection = 244
viewBookmarks = 245
class StatelessAnnotationsMenuIds(IntEnum):
viewNotes = 246
viewQuotes = 247
ANNOTATIONS_KEYBOARD_SHORTCUTS = {
AnnotationsMenuIds.addBookmark: "Ctrl-B",
AnnotationsMenuIds.addNamedBookmark: "Ctrl-Shift-B",
AnnotationsMenuIds.addNote: "Ctrl-M",
AnnotationsMenuIds.quoteSelection: "Ctrl-H",
}
class AnnotationMenu(wx.Menu):
def __init__(self, service):
super().__init__()
self.service = service
self.view = service.view
self.reader = service.reader
self.Append(
AnnotationsMenuIds.addBookmark,
_("Add &Bookmark\tCtrl-B"),
_("Add a bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNamedBookmark,
_("Add &Named Bookmark...\tCtrl-Shift-B"),
_("Add a named bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNote,
_("Add Co&mment...\tCtrl-M"),
_("Add a comment at the current position"),
)
self.Append(
AnnotationsMenuIds.quoteSelection,
_("&Highlight Selection\tCtrl-H"),
_("Highlight selected text and save it."),
)
self.Append(
AnnotationsMenuIds.viewBookmarks,
_("Saved &Bookmarks..."),
_("View added bookmarks"),
)
self.Append(
StatelessAnnotationsMenuIds.viewNotes,
_("Saved Co&mments..."),
_("View, edit, and remove comments."),
)
self.Append(
StatelessAnnotationsMenuIds.viewQuotes,
_("Saved &Highlights..."),
_("View saved highlights."),
)
self.view.Bind(
wx.EVT_MENU, self.onAddBookmark, id=AnnotationsMenuIds.addBookmark
)
self.view.Bind(
wx.EVT_MENU, self.onAddNamedBookmark, id=AnnotationsMenuIds.addNamedBookmark
)
self.view.Bind(wx.EVT_MENU, self.onAddNote, id=AnnotationsMenuIds.addNote)
self.view.Bind(
wx.EVT_MENU, self.onQuoteSelection, id=AnnotationsMenuIds.quoteSelection
)
self.view.Bind(
wx.EVT_MENU, self.onViewBookmarks, id=AnnotationsMenuIds.viewBookmarks
)
self.view.Bind(
wx.EVT_MENU, self.onViewNotes, id=StatelessAnnotationsMenuIds.viewNotes
)
self.view.Bind(
wx.EVT_MENU, self.onViewQuotes, id=StatelessAnnotationsMenuIds.viewQuotes
)
def _add_bookmark(self, name=""):
bookmarker = Bookmarker(self.reader)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
__, __, current_lino = self.view.contentTextCtrl.PositionToXY(insertionPoint)
count = 0
for bkm in bookmarker.get_for_page(self.reader.current_page):
__, __, lino = self.view.contentTextCtrl.PositionToXY(bkm.position)
if lino == current_lino:
count += 1
bookmarker.delete(bkm.id)
self.service.style_bookmark(self.view, bkm.position, enable=False)
if count and not name:
return speech.announce(_("Bookmark removed"))
Bookmarker(self.reader).create(title=name, position=insertionPoint)
speech.announce(_("Bookmark Added"))
self.service.style_bookmark(self.view, insertionPoint)
def onAddBookmark(self, event):
self._add_bookmark()
def onAddNamedBookmark(self, event):
bookmark_name = self.view.get_text_from_user(
_("Add Named Bookmark"),
_("Bookmark name:"),
)
if bookmark_name:
self._add_bookmark(bookmark_name)
def onAddNote(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
comment_text = self.view.get_text_from_user(
_("New Comment"),
_("Comment:"),
style=wx.OK | wx.CANCEL | wx.TE_MULTILINE | wx.CENTER,
)
if not comment_text:
return
note = NoteTaker(self.reader).create(
title="", content=comment_text, position=insertionPoint
)
self.service.style_comment(self.view, insertionPoint)
if _with_tags:
tags_text = self.view.get_text_from_user(
_("Tag Comment"),
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
note.tags.append(tag.strip())
NoteTaker.model.session.commit()
def onQuoteSelection(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
quoter = Quoter(self.reader)
selected_text = self.view.contentTextCtrl.GetStringSelection()
if not selected_text:
return speech.announce(_("No selection"))
x, y = self.view.get_selection_range()
for q in quoter.get_for_page():
q_range = TextRange(q.start_pos, q.end_pos)
if (q_range.start == x) and (q_range.stop == y):
quoter.delete(q.id)
self.service.style_highlight(self.view, x, y, enable=False)
return speech.announce(_("Highlight removed"))
elif (q.start_pos < x) and (q.end_pos > y):
speech.announce(_("Already highlighted"))
return wx.Bell()
if (x in q_range) or (y in q_range):
if x not in q_range:
q.start_pos = x
q.session.commit()
self.service.style_highlight(self.view, x, q_range.stop)
return speech.announce(_("Highlight extended"))
elif y not in q_range:
q.end_pos = y
q.session.commit()
self.service.style_highlight(self.view, q_range.start, y)
return speech.announce(_("Highlight extended"))
quote = quoter.create(title="", content=selected_text, start_pos=x, end_pos=y)
speech.announce(_("Selection highlighted"))
self.service.style_highlight(self.view, x, y)
if _with_tags:
tags_text = self.view.get_text_from_user(
_("Tag Highlight"),
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
quote.tags.append(tag.strip())
Quoter.model.session.commit()
def onViewBookmarks(self, event):
with BookmarksViewer(
parent=self.view,
reader=self.reader,
annotator=Bookmarker,
title=_("Bookmarks | {book}").format(book=self.reader.current_book.title),
) as dlg:
dlg.ShowModal()
def onViewNotes(self, event):
Dialog = (
CommentsDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Comments"),
reader=self.reader,
annotator_cls=NoteTaker,
can_edit=True,
) as dlg:
dlg.ShowModal()
def onViewQuotes(self, event):
Dialog = (
QuotesDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Highlights"),
reader=self.reader,
annotator_cls=Quoter,
) as dlg:
dlg.ShowModal()
| true
| true
|
f715e2db12bbd9d23ff08edf3785830ee8d31ab7
| 3,291
|
py
|
Python
|
orders/models.py
|
pmaigutyak/mp-shop
|
14ea67f71fd91a282d2070414924708214fc6464
|
[
"0BSD"
] | 2
|
2018-03-14T11:32:36.000Z
|
2021-09-25T14:31:36.000Z
|
orders/models.py
|
pmaigutyak/mp-shop
|
14ea67f71fd91a282d2070414924708214fc6464
|
[
"0BSD"
] | null | null | null |
orders/models.py
|
pmaigutyak/mp-shop
|
14ea67f71fd91a282d2070414924708214fc6464
|
[
"0BSD"
] | null | null | null |
from django.apps import apps
from django.db import models
from django.conf import settings
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from exchange.models import format_printable_price, MultiCurrencyPrice
from delivery.models import DeliveryMethodField
from orders.constants import (
PAYMENT_METHODS,
ORDER_STATUSES,
ORDER_STATUS_NEW,
PAYMENT_METHOD_PRIVAT24
)
def _generate_hash():
return get_random_string(length=10)
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='orders',
verbose_name=_('Owner'), null=True, blank=True,
on_delete=models.SET_NULL)
status = models.CharField(
_('Status'),
max_length=100,
choices=ORDER_STATUSES,
default=ORDER_STATUS_NEW)
payment_method = models.CharField(
_('Payment method'),
max_length=100,
choices=PAYMENT_METHODS)
delivery = DeliveryMethodField()
first_name = models.CharField(_('First name'), max_length=255)
last_name = models.CharField(_('Last name'), max_length=255)
middle_name = models.CharField(
_('Middle name'), max_length=255, blank=True)
address = models.CharField(_('Address'), max_length=255, blank=True)
mobile = models.CharField(_('Mobile number'), max_length=255)
created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
comment = models.TextField(_('Comment'), max_length=1000, blank=True)
hash = models.CharField(
max_length=10,
default=_generate_hash,
unique=True)
def __str__(self):
return self.printable_name
@property
def printable_name(self):
return '{} #{}'.format(_('Order'), self.id)
@property
def full_name(self):
return '{} {} {}'.format(
self.last_name, self.first_name, self.middle_name)
@property
def total(self):
return sum([i.subtotal for i in self.items.all()])
@property
def printable_total(self):
return format_printable_price(self.total)
@property
def delivery_method(self):
return self.delivery.name
def is_liqpay_payment(self):
return self.is_paynow_form_visible() and apps.is_installed('liqpay')
def is_paynow_form_visible(self):
return self.payment_method == PAYMENT_METHOD_PRIVAT24
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
class OrderedProduct(MultiCurrencyPrice):
order = models.ForeignKey(
Order,
verbose_name=_('Order'),
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(
'products.Product',
verbose_name=_('Product'),
related_name='order_items',
on_delete=models.CASCADE)
qty = models.PositiveIntegerField(_('Quantity'), default=1)
def __str__(self):
return str(self.product)
@property
def subtotal(self):
return self.price * self.qty
def printable_subtotal(self):
return format_printable_price(self.subtotal)
class Meta:
verbose_name = _('Ordered product')
verbose_name_plural = _('Ordered products')
| 25.710938
| 76
| 0.673959
|
from django.apps import apps
from django.db import models
from django.conf import settings
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from exchange.models import format_printable_price, MultiCurrencyPrice
from delivery.models import DeliveryMethodField
from orders.constants import (
PAYMENT_METHODS,
ORDER_STATUSES,
ORDER_STATUS_NEW,
PAYMENT_METHOD_PRIVAT24
)
def _generate_hash():
return get_random_string(length=10)
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='orders',
verbose_name=_('Owner'), null=True, blank=True,
on_delete=models.SET_NULL)
status = models.CharField(
_('Status'),
max_length=100,
choices=ORDER_STATUSES,
default=ORDER_STATUS_NEW)
payment_method = models.CharField(
_('Payment method'),
max_length=100,
choices=PAYMENT_METHODS)
delivery = DeliveryMethodField()
first_name = models.CharField(_('First name'), max_length=255)
last_name = models.CharField(_('Last name'), max_length=255)
middle_name = models.CharField(
_('Middle name'), max_length=255, blank=True)
address = models.CharField(_('Address'), max_length=255, blank=True)
mobile = models.CharField(_('Mobile number'), max_length=255)
created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
comment = models.TextField(_('Comment'), max_length=1000, blank=True)
hash = models.CharField(
max_length=10,
default=_generate_hash,
unique=True)
def __str__(self):
return self.printable_name
@property
def printable_name(self):
return '{} #{}'.format(_('Order'), self.id)
@property
def full_name(self):
return '{} {} {}'.format(
self.last_name, self.first_name, self.middle_name)
@property
def total(self):
return sum([i.subtotal for i in self.items.all()])
@property
def printable_total(self):
return format_printable_price(self.total)
@property
def delivery_method(self):
return self.delivery.name
def is_liqpay_payment(self):
return self.is_paynow_form_visible() and apps.is_installed('liqpay')
def is_paynow_form_visible(self):
return self.payment_method == PAYMENT_METHOD_PRIVAT24
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
class OrderedProduct(MultiCurrencyPrice):
order = models.ForeignKey(
Order,
verbose_name=_('Order'),
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(
'products.Product',
verbose_name=_('Product'),
related_name='order_items',
on_delete=models.CASCADE)
qty = models.PositiveIntegerField(_('Quantity'), default=1)
def __str__(self):
return str(self.product)
@property
def subtotal(self):
return self.price * self.qty
def printable_subtotal(self):
return format_printable_price(self.subtotal)
class Meta:
verbose_name = _('Ordered product')
verbose_name_plural = _('Ordered products')
| true
| true
|
f715e307959616301e030cf3bce9da95242c350f
| 2,917
|
py
|
Python
|
sklearn/linear_model/__init__.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 3
|
2019-11-18T13:47:42.000Z
|
2021-08-22T23:37:47.000Z
|
sklearn/linear_model/__init__.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 12
|
2021-03-06T23:42:46.000Z
|
2021-04-04T00:10:42.000Z
|
sklearn/linear_model/__init__.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 2
|
2017-06-27T12:40:35.000Z
|
2021-08-22T23:37:35.000Z
|
"""
The :mod:`sklearn.linear_model` module implements a variety of linear models.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor, SGDOneClassSVM
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SGDOneClassSVM',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
| 35.573171
| 78
| 0.618101
|
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor, SGDOneClassSVM
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SGDOneClassSVM',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
| true
| true
|
f715e36e362ae80301e03af5d3ad4b2ac4a51e76
| 239
|
py
|
Python
|
examples/pull_inbox_delivery_reports_ex.py
|
ubidreams/infobip-api-python-client
|
3e585bf00565627bd7da46a2c8f10b860faaeb8b
|
[
"Apache-2.0"
] | null | null | null |
examples/pull_inbox_delivery_reports_ex.py
|
ubidreams/infobip-api-python-client
|
3e585bf00565627bd7da46a2c8f10b860faaeb8b
|
[
"Apache-2.0"
] | null | null | null |
examples/pull_inbox_delivery_reports_ex.py
|
ubidreams/infobip-api-python-client
|
3e585bf00565627bd7da46a2c8f10b860faaeb8b
|
[
"Apache-2.0"
] | null | null | null |
from infobip.clients import get_received_messages
from __init__ import configuration
get_delivery_reports_client = get_received_messages(configuration)
response = get_delivery_reports_client.execute({"limit": 1})
print(unicode(response))
| 34.142857
| 66
| 0.857741
|
from infobip.clients import get_received_messages
from __init__ import configuration
get_delivery_reports_client = get_received_messages(configuration)
response = get_delivery_reports_client.execute({"limit": 1})
print(unicode(response))
| true
| true
|
f715e3a8e11c572f5bb1831dd2bd65643e2aa549
| 1,558
|
py
|
Python
|
tests/test_image.py
|
juliamarc/mal-tier-list-bbcode-gen
|
3b14d1982883bea6c0b5cf3ba1de5360c2d71abc
|
[
"MIT"
] | null | null | null |
tests/test_image.py
|
juliamarc/mal-tier-list-bbcode-gen
|
3b14d1982883bea6c0b5cf3ba1de5360c2d71abc
|
[
"MIT"
] | null | null | null |
tests/test_image.py
|
juliamarc/mal-tier-list-bbcode-gen
|
3b14d1982883bea6c0b5cf3ba1de5360c2d71abc
|
[
"MIT"
] | null | null | null |
import pytest
import mal_tier_list_bbcode_gen.exceptions as exceptions
from mal_tier_list_bbcode_gen.image import Image
def test_source_direct_url():
image_url = 'example.com/test.png'
image = Image('direct URL', image_url)
assert image.image_url == image_url
def test_source_google_drive_file_id():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = '1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z'
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_drive_share_link():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = ('https://drive.google.com/file/d/'
'1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z/view?usp=sharing')
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_no_file_id():
image_url = ('https://drive.google.com/file/d/view?usp=sharing')
with pytest.raises(exceptions.GoogleDriveSourceError):
Image('Google Drive', image_url)
def test_source_not_valid():
with pytest.raises(exceptions.InvalidImageSourceError,
match=r".*is not a valid image source.*"):
Image('not valid', 'example.com/test.png')
def test_get_bbcode():
image_url = 'example.com/test.png'
expected_bbcode = f'[img]{image_url}[/img]'
image = Image('direct URL', image_url)
assert image.get_bbcode() == expected_bbcode
| 30.54902
| 70
| 0.70154
|
import pytest
import mal_tier_list_bbcode_gen.exceptions as exceptions
from mal_tier_list_bbcode_gen.image import Image
def test_source_direct_url():
image_url = 'example.com/test.png'
image = Image('direct URL', image_url)
assert image.image_url == image_url
def test_source_google_drive_file_id():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = '1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z'
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_drive_share_link():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = ('https://drive.google.com/file/d/'
'1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z/view?usp=sharing')
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_no_file_id():
image_url = ('https://drive.google.com/file/d/view?usp=sharing')
with pytest.raises(exceptions.GoogleDriveSourceError):
Image('Google Drive', image_url)
def test_source_not_valid():
with pytest.raises(exceptions.InvalidImageSourceError,
match=r".*is not a valid image source.*"):
Image('not valid', 'example.com/test.png')
def test_get_bbcode():
image_url = 'example.com/test.png'
expected_bbcode = f'[img]{image_url}[/img]'
image = Image('direct URL', image_url)
assert image.get_bbcode() == expected_bbcode
| true
| true
|
f715e48b813407c0bd9d7f1f42d77633e8197d1d
| 5,342
|
py
|
Python
|
isi_sdk/models/mapping_identity_target_create_params.py
|
robzim/isilon_sdk_python
|
3c2efcae7002f8ad25c0cfcb42a53b4d83e826d7
|
[
"MIT"
] | null | null | null |
isi_sdk/models/mapping_identity_target_create_params.py
|
robzim/isilon_sdk_python
|
3c2efcae7002f8ad25c0cfcb42a53b4d83e826d7
|
[
"MIT"
] | null | null | null |
isi_sdk/models/mapping_identity_target_create_params.py
|
robzim/isilon_sdk_python
|
3c2efcae7002f8ad25c0cfcb42a53b4d83e826d7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_0.models.group_member import GroupMember # noqa: F401,E501
class MappingIdentityTargetCreateParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'on_disk': 'bool',
'target': 'GroupMember',
'type': 'str'
}
attribute_map = {
'on_disk': 'on_disk',
'target': 'target',
'type': 'type'
}
def __init__(self, on_disk=None, target=None, type=None): # noqa: E501
"""MappingIdentityTargetCreateParams - a model defined in Swagger""" # noqa: E501
self._on_disk = None
self._target = None
self._type = None
self.discriminator = None
if on_disk is not None:
self.on_disk = on_disk
self.target = target
if type is not None:
self.type = type
@property
def on_disk(self):
"""Gets the on_disk of this MappingIdentityTargetCreateParams. # noqa: E501
Identity is preferred on-disk. # noqa: E501
:return: The on_disk of this MappingIdentityTargetCreateParams. # noqa: E501
:rtype: bool
"""
return self._on_disk
@on_disk.setter
def on_disk(self, on_disk):
"""Sets the on_disk of this MappingIdentityTargetCreateParams.
Identity is preferred on-disk. # noqa: E501
:param on_disk: The on_disk of this MappingIdentityTargetCreateParams. # noqa: E501
:type: bool
"""
self._on_disk = on_disk
@property
def target(self):
"""Gets the target of this MappingIdentityTargetCreateParams. # noqa: E501
Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501
:return: The target of this MappingIdentityTargetCreateParams. # noqa: E501
:rtype: GroupMember
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this MappingIdentityTargetCreateParams.
Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501
:param target: The target of this MappingIdentityTargetCreateParams. # noqa: E501
:type: GroupMember
"""
if target is None:
raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
self._target = target
@property
def type(self):
"""Gets the type of this MappingIdentityTargetCreateParams. # noqa: E501
Origin of identity mapping. # noqa: E501
:return: The type of this MappingIdentityTargetCreateParams. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MappingIdentityTargetCreateParams.
Origin of identity mapping. # noqa: E501
:param type: The type of this MappingIdentityTargetCreateParams. # noqa: E501
:type: str
"""
allowed_values = ["auto", "external", "manual"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MappingIdentityTargetCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.677778
| 116
| 0.586484
|
import pprint
import re
import six
from isi_sdk_8_0.models.group_member import GroupMember
class MappingIdentityTargetCreateParams(object):
swagger_types = {
'on_disk': 'bool',
'target': 'GroupMember',
'type': 'str'
}
attribute_map = {
'on_disk': 'on_disk',
'target': 'target',
'type': 'type'
}
def __init__(self, on_disk=None, target=None, type=None):
self._on_disk = None
self._target = None
self._type = None
self.discriminator = None
if on_disk is not None:
self.on_disk = on_disk
self.target = target
if type is not None:
self.type = type
@property
def on_disk(self):
return self._on_disk
@on_disk.setter
def on_disk(self, on_disk):
self._on_disk = on_disk
@property
def target(self):
return self._target
@target.setter
def target(self, target):
if target is None:
raise ValueError("Invalid value for `target`, must not be `None`")
self._target = target
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["auto", "external", "manual"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, MappingIdentityTargetCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f715e4b9d03d838a2a6581b960aa71928211ff89
| 855
|
py
|
Python
|
slrp/expressions.py
|
thomasmatecki/parsley
|
0c51e9c37759fbc1c723519619952248c83e4642
|
[
"MIT"
] | null | null | null |
slrp/expressions.py
|
thomasmatecki/parsley
|
0c51e9c37759fbc1c723519619952248c83e4642
|
[
"MIT"
] | 2
|
2020-03-24T18:30:15.000Z
|
2020-03-31T10:57:37.000Z
|
slrp/expressions.py
|
thomasmatecki/parsley
|
0c51e9c37759fbc1c723519619952248c83e4642
|
[
"MIT"
] | null | null | null |
"""
Expression for matching.
"""
import re
from abc import ABC
from typing import Callable, Text, Tuple
from slrp.combos import Combinable
class RegExpr(Combinable):
"""
Regular expression matcher.
"""
def __init__(self, pattern):
self.pattern = pattern
def match(self, expr):
_match = re.match(self.pattern, expr)
if _match:
fr, to = _match.span()
return _match.groups(), expr[to:]
class StringExpr(Combinable):
"""
String Expression Matcher.
"""
def __init__(self, string: str, capture=False):
self.string = string
self.capture = capture
def match(self, expr):
if expr.startswith(self.string):
remaining = expr[len(self.string) :]
return ((self.string,), remaining) if self.capture else (tuple(), remaining)
| 21.923077
| 88
| 0.615205
|
import re
from abc import ABC
from typing import Callable, Text, Tuple
from slrp.combos import Combinable
class RegExpr(Combinable):
def __init__(self, pattern):
self.pattern = pattern
def match(self, expr):
_match = re.match(self.pattern, expr)
if _match:
fr, to = _match.span()
return _match.groups(), expr[to:]
class StringExpr(Combinable):
def __init__(self, string: str, capture=False):
self.string = string
self.capture = capture
def match(self, expr):
if expr.startswith(self.string):
remaining = expr[len(self.string) :]
return ((self.string,), remaining) if self.capture else (tuple(), remaining)
| true
| true
|
f715e4c13f0a448d661aadd39ef081eb09b73466
| 410
|
py
|
Python
|
examples/example_sparsifier_graph.py
|
tfgraph/tfgraph
|
19ae968b3060275c631dc601757646abaf1f58a1
|
[
"Apache-2.0"
] | 4
|
2017-07-23T13:48:35.000Z
|
2021-12-03T18:11:50.000Z
|
examples/example_sparsifier_graph.py
|
tfgraph/tfgraph
|
19ae968b3060275c631dc601757646abaf1f58a1
|
[
"Apache-2.0"
] | 21
|
2017-07-23T13:15:20.000Z
|
2020-09-28T02:13:11.000Z
|
examples/example_sparsifier_graph.py
|
tfgraph/tfgraph
|
19ae968b3060275c631dc601757646abaf1f58a1
|
[
"Apache-2.0"
] | 1
|
2017-07-28T10:28:04.000Z
|
2017-07-28T10:28:04.000Z
|
#!/usr/bin/python3
import tensorflow as tf
import tfgraph
def main():
with tf.Session() as sess:
g: tfgraph.Graph = tfgraph.GraphConstructor.unweighted_random(sess, "G", 10, 85)
g_sparse: tfgraph.Graph = tfgraph.GraphConstructor.as_sparsifier(sess, g, 0.75)
print(g)
print(g.m)
print(g_sparse)
print(g_sparse.m)
print(g_sparse.m / g.m)
if __name__ == '__main__':
main()
| 17.826087
| 84
| 0.673171
|
import tensorflow as tf
import tfgraph
def main():
with tf.Session() as sess:
g: tfgraph.Graph = tfgraph.GraphConstructor.unweighted_random(sess, "G", 10, 85)
g_sparse: tfgraph.Graph = tfgraph.GraphConstructor.as_sparsifier(sess, g, 0.75)
print(g)
print(g.m)
print(g_sparse)
print(g_sparse.m)
print(g_sparse.m / g.m)
if __name__ == '__main__':
main()
| true
| true
|
f715e4d2616e966f17915d11d03e9988858f4587
| 1,195
|
py
|
Python
|
shanapy/test/test_interpolater.py
|
ZhiyLiu/shanapy
|
cbcdd87f4aaa1102d5b93c9488fbcee6e28da2a6
|
[
"MIT"
] | 3
|
2021-11-21T23:14:50.000Z
|
2022-02-12T04:32:52.000Z
|
shanapy/test/test_interpolater.py
|
ZhiyLiu/shanapy
|
cbcdd87f4aaa1102d5b93c9488fbcee6e28da2a6
|
[
"MIT"
] | null | null | null |
shanapy/test/test_interpolater.py
|
ZhiyLiu/shanapy
|
cbcdd87f4aaa1102d5b93c9488fbcee6e28da2a6
|
[
"MIT"
] | null | null | null |
import vtk
from shanapy.models.sreps import Initializer, Interpolater
import pyvista as pv
## Read the input surface mesh (produced by SPHARM-PDM)
reader = vtk.vtkPolyDataReader()
reader.SetFileName('data/example_hippocampus.vtk')
reader.Update()
input_mesh = reader.GetOutput()
## Initialize an s-rep for the input mesh
initializer = Initializer()
srep = initializer.fit(input_mesh)
num_crest_pt = 24
num_samples_outward = 3
## Interpolate up spokes
interp = Interpolater(interpolate_level=3)
interp_spokes, up_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
## Interpolate down spokes
interp.interpolate_up = False
interp_down_spokes, bot_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
## interpolate fold spokes
crest_spokes = interp.interpolate_crest(srep, up_spokes, bot_spokes, num_crest_pt)
p = pv.Plotter()
p.add_mesh(input_mesh, color='white', opacity=0.3, label='Surface')
p.add_mesh(interp_spokes, color='orange', line_width=3, label='Interp Up')
# p.add_mesh(interp_down_spokes, color='cyan', line_width=3, label='Interp Down')
p.add_mesh(srep, color='red', line_width=4, label='Primary')
p.add_legend()
p.add_axes(box=True)
p.show()
| 33.194444
| 92
| 0.787448
|
import vtk
from shanapy.models.sreps import Initializer, Interpolater
import pyvista as pv
data/example_hippocampus.vtk')
reader.Update()
input_mesh = reader.GetOutput()
ializer.fit(input_mesh)
num_crest_pt = 24
num_samples_outward = 3
interpolate_level=3)
interp_spokes, up_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
False
interp_down_spokes, bot_spokes = interp.interpolate(srep, num_crest_pt, num_samples_outward)
terpolate_crest(srep, up_spokes, bot_spokes, num_crest_pt)
p = pv.Plotter()
p.add_mesh(input_mesh, color='white', opacity=0.3, label='Surface')
p.add_mesh(interp_spokes, color='orange', line_width=3, label='Interp Up')
p.add_mesh(srep, color='red', line_width=4, label='Primary')
p.add_legend()
p.add_axes(box=True)
p.show()
| true
| true
|
f715e52c53f3d913beec9dd47456ed969e6769b5
| 2,052
|
py
|
Python
|
flywheel_cli/importers/slurp_scan.py
|
amitvakula/python-cli
|
0bdbd39c40cdb3fe4dbd3b0cb38abbce94242dac
|
[
"MIT"
] | null | null | null |
flywheel_cli/importers/slurp_scan.py
|
amitvakula/python-cli
|
0bdbd39c40cdb3fe4dbd3b0cb38abbce94242dac
|
[
"MIT"
] | null | null | null |
flywheel_cli/importers/slurp_scan.py
|
amitvakula/python-cli
|
0bdbd39c40cdb3fe4dbd3b0cb38abbce94242dac
|
[
"MIT"
] | null | null | null |
"""Provides a scanner that will group files together under a common prefix"""
import copy
from .abstract_scanner import AbstractScanner
class SlurpScanner(AbstractScanner):
"""SlurpScanner groups files together by a common prefix.
This works by looking at the first slash (or if there is no slash, the first dot) in
each file path, and using that as the acquisition label.
"""
def __init__(self, config):
"""Class that handles generic acquisition slurping"""
super(SlurpScanner, self).__init__(config)
def discover(self, walker, context, container_factory, path_prefix=None, audit_log=None):
# Discover files first
files = list(sorted(walker.files(subdir=path_prefix)))
prefix_len = len(path_prefix or '')
current_prefix = None
current_files = []
for path in files:
path = path.lstrip('/')
prefix = SlurpScanner._get_prefix(path[prefix_len:])
if prefix == current_prefix:
current_files.append(path)
else:
self._add_acquisition(container_factory, context, current_prefix, current_files)
current_prefix = prefix
current_files = [path]
self._add_acquisition(container_factory, context, current_prefix, current_files)
@staticmethod
def _get_prefix(path):
"""Get the appropriate prefix for the given file"""
try:
idx = path.rindex('/')
except ValueError:
try:
idx = path.index('.')
except ValueError:
idx = len(path)
return path[:idx].strip('/').replace('/', '_')
def _add_acquisition(self, container_factory, context, label, files):
if not label or not files:
return
acquisition_context = copy.deepcopy(context)
acquisition_context.setdefault('acquisition', {})['label'] = label
container = container_factory.resolve(acquisition_context)
container.files.extend(files)
| 33.096774
| 96
| 0.634016
|
import copy
from .abstract_scanner import AbstractScanner
class SlurpScanner(AbstractScanner):
def __init__(self, config):
super(SlurpScanner, self).__init__(config)
def discover(self, walker, context, container_factory, path_prefix=None, audit_log=None):
files = list(sorted(walker.files(subdir=path_prefix)))
prefix_len = len(path_prefix or '')
current_prefix = None
current_files = []
for path in files:
path = path.lstrip('/')
prefix = SlurpScanner._get_prefix(path[prefix_len:])
if prefix == current_prefix:
current_files.append(path)
else:
self._add_acquisition(container_factory, context, current_prefix, current_files)
current_prefix = prefix
current_files = [path]
self._add_acquisition(container_factory, context, current_prefix, current_files)
@staticmethod
def _get_prefix(path):
try:
idx = path.rindex('/')
except ValueError:
try:
idx = path.index('.')
except ValueError:
idx = len(path)
return path[:idx].strip('/').replace('/', '_')
def _add_acquisition(self, container_factory, context, label, files):
if not label or not files:
return
acquisition_context = copy.deepcopy(context)
acquisition_context.setdefault('acquisition', {})['label'] = label
container = container_factory.resolve(acquisition_context)
container.files.extend(files)
| true
| true
|
f715e6fb9ac52b17d3d805190df8d63c65156cf6
| 5,008
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>:<port>
[<ipv6>]:<port>
<onion>.onion:<port>
<i2p>.b32.i2p:<port>
The output will be two data structures with the peers in binary format:
static const uint8_t chainparams_seed_{main,test}[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from enum import Enum
import struct
import sys
import os
import re
class BIP155Network(Enum):
IPV4 = 1
IPV6 = 2
TORV2 = 3
TORV3 = 4
I2P = 5
CJDNS = 6
def name_to_bip155(addr):
'''Convert address string to BIP155 (networkID, addr) tuple.'''
if addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) == 10:
return (BIP155Network.TORV2, vchAddr)
elif len(vchAddr) == 35:
assert(vchAddr[34] == 3)
return (BIP155Network.TORV3, vchAddr[:32])
else:
raise ValueError('Invalid onion %s' % vchAddr)
elif addr.endswith('.b32.i2p'):
vchAddr = b32decode(addr[0:-8] + '====', True)
if len(vchAddr) == 32:
return (BIP155Network.I2P, vchAddr)
else:
raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr: # IPv4
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1]))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s):
'''Convert endpoint string to BIP155 (networkID, addr, port) tuple.'''
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = 0
else:
port = int(port)
host = name_to_bip155(host)
return host + (port, )
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def bip155_serialize(spec):
'''
Serialize (networkID, addr, port) tuple to BIP155 binary format.
'''
r = b""
r += struct.pack('B', spec[0].value)
r += ser_compact_size(len(spec[1]))
r += spec[1]
r += struct.pack('>H', spec[2])
return r
def process_nodes(g, f, structname):
g.write('static const uint8_t %s[] = {\n' % structname)
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
spec = parse_spec(line)
blob = bip155_serialize(spec)
hoststr = ','.join(('0x%02x' % b) for b in blob)
g.write(f' {hoststr},\n')
g.write('};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BEANS_CHAINPARAMSSEEDS_H\n')
g.write('#define BEANS_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the beans network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_main')
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_test')
g.write('#endif // BEANS_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 29.988024
| 91
| 0.568091
|
from base64 import b32decode
from enum import Enum
import struct
import sys
import os
import re
class BIP155Network(Enum):
IPV4 = 1
IPV6 = 2
TORV2 = 3
TORV3 = 4
I2P = 5
CJDNS = 6
def name_to_bip155(addr):
if addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) == 10:
return (BIP155Network.TORV2, vchAddr)
elif len(vchAddr) == 35:
assert(vchAddr[34] == 3)
return (BIP155Network.TORV3, vchAddr[:32])
else:
raise ValueError('Invalid onion %s' % vchAddr)
elif addr.endswith('.b32.i2p'):
vchAddr = b32decode(addr[0:-8] + '====', True)
if len(vchAddr) == 32:
return (BIP155Network.I2P, vchAddr)
else:
raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr:
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1]))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = 0
else:
port = int(port)
host = name_to_bip155(host)
return host + (port, )
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def bip155_serialize(spec):
r = b""
r += struct.pack('B', spec[0].value)
r += ser_compact_size(len(spec[1]))
r += spec[1]
r += struct.pack('>H', spec[2])
return r
def process_nodes(g, f, structname):
g.write('static const uint8_t %s[] = {\n' % structname)
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
spec = parse_spec(line)
blob = bip155_serialize(spec)
hoststr = ','.join(('0x%02x' % b) for b in blob)
g.write(f' {hoststr},\n')
g.write('};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BEANS_CHAINPARAMSSEEDS_H\n')
g.write('#define BEANS_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the beans network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_main')
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_test')
g.write('#endif // BEANS_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
f715e76b2533da62db85b11847c06082ebf9c1c8
| 565
|
py
|
Python
|
PDF_Copy_Paster/scripts/pdfcp.py
|
cooperbeaman/cogs18pdfcpfinalproject
|
ade7cf46534e8817b327f1c35ebf617cc977d872
|
[
"CNRI-Python",
"Adobe-2006",
"Adobe-Glyph"
] | null | null | null |
PDF_Copy_Paster/scripts/pdfcp.py
|
cooperbeaman/cogs18pdfcpfinalproject
|
ade7cf46534e8817b327f1c35ebf617cc977d872
|
[
"CNRI-Python",
"Adobe-2006",
"Adobe-Glyph"
] | null | null | null |
PDF_Copy_Paster/scripts/pdfcp.py
|
cooperbeaman/cogs18pdfcpfinalproject
|
ade7cf46534e8817b327f1c35ebf617cc977d872
|
[
"CNRI-Python",
"Adobe-2006",
"Adobe-Glyph"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
"""Script runs pdf copy paste tool through command prompt.
Automatically monitors and updates clipboard contents.
"""
# Allows importing functions from functions.py in my_module folder
import sys
sys.path.append('../')
# Imports functions from my_module folder
from my_module.functions import *
# Runs windows clipboard monitoring and updating function "pdfcp"
# to remove line breaks from copied pdf text and optionally enclose
# copied pdf text in quotes or append a carriage return to the end of it
run()
| 25.681818
| 72
| 0.762832
|
import sys
sys.path.append('../')
from my_module.functions import *
run()
| true
| true
|
f715e77d6fe49e1cc89dddd041c453a19b1ba4b5
| 2,779
|
py
|
Python
|
library/Kernel_based_Regressions.py
|
SushantKuchankar/Machine_Learning_library
|
3e0421141e607e5099fce6c84b63948165e75aa0
|
[
"MIT"
] | null | null | null |
library/Kernel_based_Regressions.py
|
SushantKuchankar/Machine_Learning_library
|
3e0421141e607e5099fce6c84b63948165e75aa0
|
[
"MIT"
] | null | null | null |
library/Kernel_based_Regressions.py
|
SushantKuchankar/Machine_Learning_library
|
3e0421141e607e5099fce6c84b63948165e75aa0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Sushant
"""
import numpy as np
import scipy
######################################## Least Square Linear Regression ####################################
def LinearRegression(data,labels):
numdata = int( np.size(data,0) )
b1 = np.hstack(( data,np.ones((numdata,1)) ) )
XXT = np.matmul(b1.T,b1)
invXXT = np.linalg.pinv(XXT)
b2 = np.matmul(b1.T,labels)
w = np.matmul(invXXT,b2)
return w
###################################### Ridges Regression ###################################################
def RidgeRegression(data,labels,lambda):
numdata = int( np.size(data,0) )
b1 = np.hstack(( data,np.ones((numdata,1)) ) )
XXT = np.matmul(b1.T,b1) + lambda*np.identity(np.size(b1,1))
b2 = np.matmul(b1.T,labels)
#solved using Cholesky decompostion Ax = b
b3 = scipy.linalg.cho_factor( XXT )
w = scipy.linalg.cho_solve(b3,b2)
return w
###################################### Predict values and Least Square Error###############################
def PredictLabels(testdata,w):
numdata = int( np.size(testdata,0) )
b3 = np.hstack( ( testdata,np.ones((numdata,1)) ) )
pred = np.matmul(b3,w)
return pred
def ltsqerror(prelabels,actlabels):
return np.sum((prelabels-actlabels)**2)/int(np.size(prelabels,0))
####################################### Kernel Ridges Regression ########################################
def linear(x1,x2,p = None):
return np.dot(x1,x2)
def polynomial(x1,x2,d):
return ( 1+np.dot(x1,x2) )**d
def rbf(x1,x2,l):
return np.exp( -np.divide(np.dot(x1-x2,x1-x2), 2*(l**2 ) ) )
def KernelRidgeRegression(data,labels,lamda,kernel,p):
numdata = int( np.size(data,0) )
traindata = np.asarray(data)
#=========Kernel matrix======================
K = np.zeros((numdata,numdata))
for i in range(0,numdata):
for j in range(0,numdata):
K[i,j] = kernel(traindata[i,:],traindata[j,:],p)
#solved using Cholesky decompostion Ax = b
b1 = scipy.linalg.cho_factor( K + lamda*np.identity(numdata) )
alphas = scipy.linalg.cho_solve(b1,labels)
return alphas
def KernelRidgesRegression_predict(traindata1,alphas,testdata1,kernel,p):
numtraindata = int( np.size(traindata1,0) )
numtestdata = int( np.size(testdata1,0) )
traindata = np.asarray(traindata1)
testdata = np.asarray(testdata1)
predlabels = np.zeros((numtestdata,1))
K = np.zeros((numtestdata,numtraindata))
for j in range(0,numtestdata):
for i in range(0,numtraindata):
K[j,i] = kernel(traindata[i,:],testdata[j,:],p)
predlabels = np.dot(K,alphas)
return predlabels
| 34.308642
| 109
| 0.543721
|
"""
@author: Sushant
"""
import numpy as np
import scipy
| false
| true
|
f715e9d439ec161b580dbb638b66c76fe3d21b3d
| 2,166
|
py
|
Python
|
tests/mantid_data_helper.py
|
scipp/scipp-ci-mantid
|
29164f633096c4eeb0a8579b72165c96315113f8
|
[
"Apache-2.0"
] | null | null | null |
tests/mantid_data_helper.py
|
scipp/scipp-ci-mantid
|
29164f633096c4eeb0a8579b72165c96315113f8
|
[
"Apache-2.0"
] | null | null | null |
tests/mantid_data_helper.py
|
scipp/scipp-ci-mantid
|
29164f633096c4eeb0a8579b72165c96315113f8
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: GPL-3.0-or-later
# Copyright (c) 2019 Scipp contributors (https://github.com/scipp)
# @author Dimitar Tasev
import os
import hashlib
import sys
import subprocess as sp
def download_file(source, destination):
command = "wget -O {} {}".format(destination, source)
status = sp.run(command, shell=True).returncode
if status != 0:
raise RuntimeError("Can't load {} to {}.".format(source, destination))
class MantidDataHelper:
# Valid only for Linux. Windows is as C:\MantidExternalData
DATA_DIR = os.path.abspath(os.path.expanduser(
"/opt/tests/MantidExternalData"))
DATA_LOCATION = "{data_dir}/{algorithm}/{hash}"
DATA_FILES = {
"CNCS_51936_event.nxs": {
"hash": "5ba401e489260a44374b5be12b780911",
"algorithm": "MD5"},
"iris26176_graphite002_sqw.nxs": {
"hash": "7ea63f9137602b7e9b604fe30f0c6ec2",
"algorithm": "MD5"},
"WISH00016748.raw": {
"hash": "37ecc6f99662b57e405ed967bdc068af",
"algorithm": "MD5"},
}
REMOTE_URL = "http://198.74.56.37/ftp/external-data/"\
"{algorithm}/{hash}"
@classmethod
def find_file(cls, name):
data_file = cls.DATA_FILES[name]
data_location = cls.DATA_LOCATION.format(
data_dir=cls.DATA_DIR,
algorithm=data_file["algorithm"],
hash=data_file["hash"])
dir_name = os.path.dirname(data_location)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
if not os.path.isfile(data_location):
file_hash = data_file["hash"]
algorithm = data_file["algorithm"]
query = cls.REMOTE_URL.format(algorithm=algorithm,
hash=file_hash)
download_file(query, data_location)
if algorithm == "MD5":
with open(data_location, "rb") as file:
md5 = hashlib.md5(file.read()).hexdigest()
if md5 != file_hash:
raise RuntimeError("Check sum doesn't match.")
return data_location
| 35.508197
| 78
| 0.5988
|
import os
import hashlib
import sys
import subprocess as sp
def download_file(source, destination):
command = "wget -O {} {}".format(destination, source)
status = sp.run(command, shell=True).returncode
if status != 0:
raise RuntimeError("Can't load {} to {}.".format(source, destination))
class MantidDataHelper:
# Valid only for Linux. Windows is as C:\MantidExternalData
DATA_DIR = os.path.abspath(os.path.expanduser(
"/opt/tests/MantidExternalData"))
DATA_LOCATION = "{data_dir}/{algorithm}/{hash}"
DATA_FILES = {
"CNCS_51936_event.nxs": {
"hash": "5ba401e489260a44374b5be12b780911",
"algorithm": "MD5"},
"iris26176_graphite002_sqw.nxs": {
"hash": "7ea63f9137602b7e9b604fe30f0c6ec2",
"algorithm": "MD5"},
"WISH00016748.raw": {
"hash": "37ecc6f99662b57e405ed967bdc068af",
"algorithm": "MD5"},
}
REMOTE_URL = "http://198.74.56.37/ftp/external-data/"\
"{algorithm}/{hash}"
@classmethod
def find_file(cls, name):
data_file = cls.DATA_FILES[name]
data_location = cls.DATA_LOCATION.format(
data_dir=cls.DATA_DIR,
algorithm=data_file["algorithm"],
hash=data_file["hash"])
dir_name = os.path.dirname(data_location)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
if not os.path.isfile(data_location):
file_hash = data_file["hash"]
algorithm = data_file["algorithm"]
query = cls.REMOTE_URL.format(algorithm=algorithm,
hash=file_hash)
download_file(query, data_location)
if algorithm == "MD5":
with open(data_location, "rb") as file:
md5 = hashlib.md5(file.read()).hexdigest()
if md5 != file_hash:
raise RuntimeError("Check sum doesn't match.")
return data_location
| true
| true
|
f715eb646ed9af649d8f5a29c1d0d68ce7a3e4b3
| 571
|
py
|
Python
|
setup.py
|
TomKealy/causal-forest
|
04f3aeb1ac5547a78b96eca9bdb51b61f9e940f4
|
[
"MIT"
] | null | null | null |
setup.py
|
TomKealy/causal-forest
|
04f3aeb1ac5547a78b96eca9bdb51b61f9e940f4
|
[
"MIT"
] | null | null | null |
setup.py
|
TomKealy/causal-forest
|
04f3aeb1ac5547a78b96eca9bdb51b61f9e940f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Setup file for cforest.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 23.791667
| 75
| 0.702277
|
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| true
| true
|
f715ec4a046a358ebcab33b297f3acf0d66c97dd
| 9,457
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/task_agent/v4_0/models/task_group.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/task_agent/v4_0/models/task_group.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/task_agent/v4_0/models/task_group.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .task_definition import TaskDefinition
class TaskGroup(TaskDefinition):
"""TaskGroup.
:param agent_execution:
:type agent_execution: :class:`TaskExecution <task-agent.v4_0.models.TaskExecution>`
:param author:
:type author: str
:param category:
:type category: str
:param contents_uploaded:
:type contents_uploaded: bool
:param contribution_identifier:
:type contribution_identifier: str
:param contribution_version:
:type contribution_version: str
:param data_source_bindings:
:type data_source_bindings: list of :class:`DataSourceBinding <task-agent.v4_0.models.DataSourceBinding>`
:param definition_type:
:type definition_type: str
:param demands:
:type demands: list of :class:`object <task-agent.v4_0.models.object>`
:param deprecated:
:type deprecated: bool
:param description:
:type description: str
:param disabled:
:type disabled: bool
:param execution:
:type execution: dict
:param friendly_name:
:type friendly_name: str
:param groups:
:type groups: list of :class:`TaskGroupDefinition <task-agent.v4_0.models.TaskGroupDefinition>`
:param help_mark_down:
:type help_mark_down: str
:param host_type:
:type host_type: str
:param icon_url:
:type icon_url: str
:param id:
:type id: str
:param inputs:
:type inputs: list of :class:`TaskInputDefinition <task-agent.v4_0.models.TaskInputDefinition>`
:param instance_name_format:
:type instance_name_format: str
:param minimum_agent_version:
:type minimum_agent_version: str
:param name:
:type name: str
:param output_variables:
:type output_variables: list of :class:`TaskOutputVariable <task-agent.v4_0.models.TaskOutputVariable>`
:param package_location:
:type package_location: str
:param package_type:
:type package_type: str
:param preview:
:type preview: bool
:param release_notes:
:type release_notes: str
:param runs_on:
:type runs_on: list of str
:param satisfies:
:type satisfies: list of str
:param server_owned:
:type server_owned: bool
:param source_definitions:
:type source_definitions: list of :class:`TaskSourceDefinition <task-agent.v4_0.models.TaskSourceDefinition>`
:param source_location:
:type source_location: str
:param version:
:type version: :class:`TaskVersion <task-agent.v4_0.models.TaskVersion>`
:param visibility:
:type visibility: list of str
:param comment: Gets or sets comment.
:type comment: str
:param created_by: Gets or sets the identity who created.
:type created_by: :class:`IdentityRef <task-agent.v4_0.models.IdentityRef>`
:param created_on: Gets or sets date on which it got created.
:type created_on: datetime
:param deleted: Gets or sets as 'true' to indicate as deleted, 'false' otherwise.
:type deleted: bool
:param modified_by: Gets or sets the identity who modified.
:type modified_by: :class:`IdentityRef <task-agent.v4_0.models.IdentityRef>`
:param modified_on: Gets or sets date on which it got modified.
:type modified_on: datetime
:param owner: Gets or sets the owner.
:type owner: str
:param parent_definition_id: Gets or sets parent task group Id. This is used while creating a draft task group.
:type parent_definition_id: str
:param revision: Gets or sets revision.
:type revision: int
:param tasks:
:type tasks: list of :class:`TaskGroupStep <task-agent.v4_0.models.TaskGroupStep>`
"""
_attribute_map = {
'agent_execution': {'key': 'agentExecution', 'type': 'TaskExecution'},
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'contents_uploaded': {'key': 'contentsUploaded', 'type': 'bool'},
'contribution_identifier': {'key': 'contributionIdentifier', 'type': 'str'},
'contribution_version': {'key': 'contributionVersion', 'type': 'str'},
'data_source_bindings': {'key': 'dataSourceBindings', 'type': '[DataSourceBinding]'},
'definition_type': {'key': 'definitionType', 'type': 'str'},
'demands': {'key': 'demands', 'type': '[object]'},
'deprecated': {'key': 'deprecated', 'type': 'bool'},
'description': {'key': 'description', 'type': 'str'},
'disabled': {'key': 'disabled', 'type': 'bool'},
'execution': {'key': 'execution', 'type': '{object}'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[TaskGroupDefinition]'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'host_type': {'key': 'hostType', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'minimum_agent_version': {'key': 'minimumAgentVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'output_variables': {'key': 'outputVariables', 'type': '[TaskOutputVariable]'},
'package_location': {'key': 'packageLocation', 'type': 'str'},
'package_type': {'key': 'packageType', 'type': 'str'},
'preview': {'key': 'preview', 'type': 'bool'},
'release_notes': {'key': 'releaseNotes', 'type': 'str'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'satisfies': {'key': 'satisfies', 'type': '[str]'},
'server_owned': {'key': 'serverOwned', 'type': 'bool'},
'source_definitions': {'key': 'sourceDefinitions', 'type': '[TaskSourceDefinition]'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'version': {'key': 'version', 'type': 'TaskVersion'},
'visibility': {'key': 'visibility', 'type': '[str]'},
'comment': {'key': 'comment', 'type': 'str'},
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'deleted': {'key': 'deleted', 'type': 'bool'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'str'},
'parent_definition_id': {'key': 'parentDefinitionId', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'tasks': {'key': 'tasks', 'type': '[TaskGroupStep]'}
}
def __init__(self, agent_execution=None, author=None, category=None, contents_uploaded=None, contribution_identifier=None, contribution_version=None, data_source_bindings=None, definition_type=None, demands=None, deprecated=None, description=None, disabled=None, execution=None, friendly_name=None, groups=None, help_mark_down=None, host_type=None, icon_url=None, id=None, inputs=None, instance_name_format=None, minimum_agent_version=None, name=None, output_variables=None, package_location=None, package_type=None, preview=None, release_notes=None, runs_on=None, satisfies=None, server_owned=None, source_definitions=None, source_location=None, version=None, visibility=None, comment=None, created_by=None, created_on=None, deleted=None, modified_by=None, modified_on=None, owner=None, parent_definition_id=None, revision=None, tasks=None):
super(TaskGroup, self).__init__(agent_execution=agent_execution, author=author, category=category, contents_uploaded=contents_uploaded, contribution_identifier=contribution_identifier, contribution_version=contribution_version, data_source_bindings=data_source_bindings, definition_type=definition_type, demands=demands, deprecated=deprecated, description=description, disabled=disabled, execution=execution, friendly_name=friendly_name, groups=groups, help_mark_down=help_mark_down, host_type=host_type, icon_url=icon_url, id=id, inputs=inputs, instance_name_format=instance_name_format, minimum_agent_version=minimum_agent_version, name=name, output_variables=output_variables, package_location=package_location, package_type=package_type, preview=preview, release_notes=release_notes, runs_on=runs_on, satisfies=satisfies, server_owned=server_owned, source_definitions=source_definitions, source_location=source_location, version=version, visibility=visibility)
self.comment = comment
self.created_by = created_by
self.created_on = created_on
self.deleted = deleted
self.modified_by = modified_by
self.modified_on = modified_on
self.owner = owner
self.parent_definition_id = parent_definition_id
self.revision = revision
self.tasks = tasks
| 56.628743
| 973
| 0.649466
|
from .task_definition import TaskDefinition
class TaskGroup(TaskDefinition):
_attribute_map = {
'agent_execution': {'key': 'agentExecution', 'type': 'TaskExecution'},
'author': {'key': 'author', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'contents_uploaded': {'key': 'contentsUploaded', 'type': 'bool'},
'contribution_identifier': {'key': 'contributionIdentifier', 'type': 'str'},
'contribution_version': {'key': 'contributionVersion', 'type': 'str'},
'data_source_bindings': {'key': 'dataSourceBindings', 'type': '[DataSourceBinding]'},
'definition_type': {'key': 'definitionType', 'type': 'str'},
'demands': {'key': 'demands', 'type': '[object]'},
'deprecated': {'key': 'deprecated', 'type': 'bool'},
'description': {'key': 'description', 'type': 'str'},
'disabled': {'key': 'disabled', 'type': 'bool'},
'execution': {'key': 'execution', 'type': '{object}'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'groups': {'key': 'groups', 'type': '[TaskGroupDefinition]'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'host_type': {'key': 'hostType', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[TaskInputDefinition]'},
'instance_name_format': {'key': 'instanceNameFormat', 'type': 'str'},
'minimum_agent_version': {'key': 'minimumAgentVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'output_variables': {'key': 'outputVariables', 'type': '[TaskOutputVariable]'},
'package_location': {'key': 'packageLocation', 'type': 'str'},
'package_type': {'key': 'packageType', 'type': 'str'},
'preview': {'key': 'preview', 'type': 'bool'},
'release_notes': {'key': 'releaseNotes', 'type': 'str'},
'runs_on': {'key': 'runsOn', 'type': '[str]'},
'satisfies': {'key': 'satisfies', 'type': '[str]'},
'server_owned': {'key': 'serverOwned', 'type': 'bool'},
'source_definitions': {'key': 'sourceDefinitions', 'type': '[TaskSourceDefinition]'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'version': {'key': 'version', 'type': 'TaskVersion'},
'visibility': {'key': 'visibility', 'type': '[str]'},
'comment': {'key': 'comment', 'type': 'str'},
'created_by': {'key': 'createdBy', 'type': 'IdentityRef'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'deleted': {'key': 'deleted', 'type': 'bool'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'str'},
'parent_definition_id': {'key': 'parentDefinitionId', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'},
'tasks': {'key': 'tasks', 'type': '[TaskGroupStep]'}
}
def __init__(self, agent_execution=None, author=None, category=None, contents_uploaded=None, contribution_identifier=None, contribution_version=None, data_source_bindings=None, definition_type=None, demands=None, deprecated=None, description=None, disabled=None, execution=None, friendly_name=None, groups=None, help_mark_down=None, host_type=None, icon_url=None, id=None, inputs=None, instance_name_format=None, minimum_agent_version=None, name=None, output_variables=None, package_location=None, package_type=None, preview=None, release_notes=None, runs_on=None, satisfies=None, server_owned=None, source_definitions=None, source_location=None, version=None, visibility=None, comment=None, created_by=None, created_on=None, deleted=None, modified_by=None, modified_on=None, owner=None, parent_definition_id=None, revision=None, tasks=None):
super(TaskGroup, self).__init__(agent_execution=agent_execution, author=author, category=category, contents_uploaded=contents_uploaded, contribution_identifier=contribution_identifier, contribution_version=contribution_version, data_source_bindings=data_source_bindings, definition_type=definition_type, demands=demands, deprecated=deprecated, description=description, disabled=disabled, execution=execution, friendly_name=friendly_name, groups=groups, help_mark_down=help_mark_down, host_type=host_type, icon_url=icon_url, id=id, inputs=inputs, instance_name_format=instance_name_format, minimum_agent_version=minimum_agent_version, name=name, output_variables=output_variables, package_location=package_location, package_type=package_type, preview=preview, release_notes=release_notes, runs_on=runs_on, satisfies=satisfies, server_owned=server_owned, source_definitions=source_definitions, source_location=source_location, version=version, visibility=visibility)
self.comment = comment
self.created_by = created_by
self.created_on = created_on
self.deleted = deleted
self.modified_by = modified_by
self.modified_on = modified_on
self.owner = owner
self.parent_definition_id = parent_definition_id
self.revision = revision
self.tasks = tasks
| true
| true
|
f715ec73403c11a6f9f9c12405d55dc8c40d491d
| 2,782
|
py
|
Python
|
rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py
|
HeyLifeHD/rp-bp
|
9c59b1bc0267400747477467c45f96364d5528e1
|
[
"MIT"
] | 6
|
2016-05-16T18:52:41.000Z
|
2021-12-31T06:27:29.000Z
|
rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py
|
HeyLifeHD/rp-bp
|
9c59b1bc0267400747477467c45f96364d5528e1
|
[
"MIT"
] | 110
|
2016-06-22T13:24:39.000Z
|
2022-02-07T09:29:14.000Z
|
rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py
|
HeyLifeHD/rp-bp
|
9c59b1bc0267400747477467c45f96364d5528e1
|
[
"MIT"
] | 5
|
2017-05-22T12:21:51.000Z
|
2022-02-06T10:32:56.000Z
|
#! /usr/bin/env python3
import matplotlib
matplotlib.use('agg')
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import logging
default_title = "Metagene profile Bayes' factors"
default_xlabel = "Offset, relative to translation \ninitiation site"
default_ylabel = "Bayes' factor"
default_font_size = 15
default_series_label = ""
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script visualizes the Bayes' factors for a metagene profile.\n\n"
"This script contains some hard-coded field names.")
parser.add_argument('bayes_factors', help="The metagene profile (csv) file")
parser.add_argument('length', help="The profile lengths to visualize", type=int)
parser.add_argument('out', help="The (output) image file")
parser.add_argument('--title', help="The title for the figure", default=default_title)
parser.add_argument('--xlabel', help="The label for the x-axis", default=default_xlabel)
parser.add_argument('--ylabel', help="The label for the y-axis", default=default_ylabel)
parser.add_argument('--series-label', help="The label for the legend", default=default_series_label)
parser.add_argument('--font-size', help="The font size for the title, axis labels, and "
"xticks labels", type=int, default=default_font_size)
args = parser.parse_args()
bayes_factors = pd.read_csv(args.bayes_factors)
mask_length = bayes_factors['length'] == args.length
group = bayes_factors.loc[mask_length]
bfs = group['bayes_factor_mean']
offsets = group['offset']
bf_range = max(bfs) - min(bfs)
fig, ax = plt.subplots(figsize=(10,5))
ax.plot(offsets, bfs, label=args.series_label, color='b')
ax.scatter(offsets, bfs, color='b')
xlim = (min(offsets), max(offsets))
ymin = min(bfs) - 0.1*bf_range
ymax = max(bfs) + 0.1*bf_range
ylim = (ymin, ymax)
# and draw a line at "bf=5"
plt.plot(xlim, (5, 5), color='k', linewidth=2, linestyle=':')
# and a horizontal line at the maximum bf
plt.plot(xlim, (max(bfs), max(bfs)), color='r', linewidth=1, linestyle="-.")
# and a vertical line at "offset=-12"
ax.plot((-12, -12), ylim, color='g', linestyle="--")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# finally, add the labels, etc.
plt.suptitle(args.title, fontsize=args.font_size, y=1.03)
ax.set_xlabel(args.xlabel, fontsize=args.font_size)
ax.set_ylabel(args.ylabel, fontsize=args.font_size)
ax.tick_params(axis='both', which='major', labelsize=args.font_size)
#ax.legend(loc="upper right")
fig.tight_layout()
fig.savefig(args.out, bbox_inches='tight')
if __name__ == '__main__':
main()
| 34.345679
| 104
| 0.691948
|
import matplotlib
matplotlib.use('agg')
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import logging
default_title = "Metagene profile Bayes' factors"
default_xlabel = "Offset, relative to translation \ninitiation site"
default_ylabel = "Bayes' factor"
default_font_size = 15
default_series_label = ""
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script visualizes the Bayes' factors for a metagene profile.\n\n"
"This script contains some hard-coded field names.")
parser.add_argument('bayes_factors', help="The metagene profile (csv) file")
parser.add_argument('length', help="The profile lengths to visualize", type=int)
parser.add_argument('out', help="The (output) image file")
parser.add_argument('--title', help="The title for the figure", default=default_title)
parser.add_argument('--xlabel', help="The label for the x-axis", default=default_xlabel)
parser.add_argument('--ylabel', help="The label for the y-axis", default=default_ylabel)
parser.add_argument('--series-label', help="The label for the legend", default=default_series_label)
parser.add_argument('--font-size', help="The font size for the title, axis labels, and "
"xticks labels", type=int, default=default_font_size)
args = parser.parse_args()
bayes_factors = pd.read_csv(args.bayes_factors)
mask_length = bayes_factors['length'] == args.length
group = bayes_factors.loc[mask_length]
bfs = group['bayes_factor_mean']
offsets = group['offset']
bf_range = max(bfs) - min(bfs)
fig, ax = plt.subplots(figsize=(10,5))
ax.plot(offsets, bfs, label=args.series_label, color='b')
ax.scatter(offsets, bfs, color='b')
xlim = (min(offsets), max(offsets))
ymin = min(bfs) - 0.1*bf_range
ymax = max(bfs) + 0.1*bf_range
ylim = (ymin, ymax)
# and draw a line at "bf=5"
plt.plot(xlim, (5, 5), color='k', linewidth=2, linestyle=':')
# and a horizontal line at the maximum bf
plt.plot(xlim, (max(bfs), max(bfs)), color='r', linewidth=1, linestyle="-.")
# and a vertical line at "offset=-12"
ax.plot((-12, -12), ylim, color='g', linestyle="--")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# finally, add the labels, etc.
plt.suptitle(args.title, fontsize=args.font_size, y=1.03)
ax.set_xlabel(args.xlabel, fontsize=args.font_size)
ax.set_ylabel(args.ylabel, fontsize=args.font_size)
ax.tick_params(axis='both', which='major', labelsize=args.font_size)
#ax.legend(loc="upper right")
fig.tight_layout()
fig.savefig(args.out, bbox_inches='tight')
if __name__ == '__main__':
main()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.